Added Firewall Policies Monitoring

Added buffering for writes (execution now 4x faster on my laptop)
Added aligned timestamps per monitored resource type
This commit is contained in:
Maurizio Noseda Pedraglio 2022-10-10 15:53:14 +02:00
parent 82f3daf917
commit c52b623857
10 changed files with 365 additions and 154 deletions

View File

@ -172,3 +172,17 @@ metrics_per_project:
utilization:
name: firewalls_per_project_utilization
description: Number of VPC firewall rules in a project - utilization.
metrics_per_firewall_policy:
firewall_policy_tuples:
usage:
name: firewall_policy_tuples_per_policy_usage
description: Number of tuples in a firewall policy - usage.
limit:
# This limit is not visibile through Google APIs, set default_value
name: firewall_policy_tuples_per_policy_limit
description: Number of tuples in a firewall policy - limit.
values:
default_value: 2000
utilization:
name: firewall_policy_tuples_per_policy_utilization
description: Number of tuples in a firewall policy - utilization.

View File

@ -0,0 +1,117 @@
#
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ast import AnnAssign
import re
import time
from collections import defaultdict
from pydoc import doc
from collections import defaultdict
from google.protobuf import field_mask_pb2
from . import metrics, networks, limits
def get_firewall_policies_dict(config: dict):
'''
Calls the Asset Inventory API to get all Firewall Policies under the GCP organization
Parameters:
config (dict): The dict containing config like clients and limits
Returns:
firewal_policies_dict (dictionary of dictionary): Keys are policy ids, subkeys are policy field values
'''
firewall_policies_dict = defaultdict(int)
read_mask = field_mask_pb2.FieldMask()
read_mask.FromJsonString('name,versionedResources')
response = config["clients"]["asset_client"].search_all_resources(
request={
"scope": f"organizations/{config['organization']}",
"asset_types": ["compute.googleapis.com/FirewallPolicy"],
"read_mask": read_mask,
})
for resource in response:
for versioned in resource.versioned_resources:
firewall_policy = dict()
for field_name, field_value in versioned.resource.items():
firewall_policy[field_name] = field_value
firewall_policies_dict[firewall_policy['id']] = firewall_policy
return firewall_policies_dict
def get_firewal_policies_data(config, metrics_dict, firewall_policies_dict):
'''
Gets the data for VPC Firewall lorem ipsum
Parameters:
config (dict): The dict containing config like clients and limits
metrics_dict (dictionary of dictionary of string: string): metrics names and descriptions.
firewall_policies_dict (dictionary of of dictionary of string: string): Keys are policies ids, subkeys are policies values
Returns:
None
'''
current_tuples_limit = None
try:
current_tuples_limit = metrics_dict["metrics_per_firewall_policy"][
"firewall_policy_tuples"]["limit"]["values"]["default_value"]
except Exception:
print(
f"Could not determine number of tuples metric limit due to missing default value"
)
if current_tuples_limit < 0:
print(
f"Could not determine number of tuples metric limit as default value is <= 0"
)
timestamp = time.time()
for firewall_policy_key in firewall_policies_dict:
firewall_policy = firewall_policies_dict[firewall_policy_key]
# may either be a org, a folder, or a project
# folder and org require to split {folder,organization}\/\w+
parent = re.search("(\w+$)", firewall_policy["parent"]).group(
1) if "parent" in firewall_policy else re.search(
"([\d,a-z,-]+)(\/[\d,a-z,-]+\/firewallPolicies/[\d,a-z,-]*$)",
firewall_policy["selfLink"]).group(1)
parent_type = re.search("(^\w+)", firewall_policy["parent"]).group(
1) if "parent" in firewall_policy else "projects"
metric_labels = {'parent': parent, 'parent_type': parent_type}
metric_labels["name"] = firewall_policy[
"displayName"] if "displayName" in firewall_policy else firewall_policy[
"name"]
metrics.append_data_to_series_buffer(
config, metrics_dict["metrics_per_firewall_policy"]
[f"firewall_policy_tuples"]["usage"]["name"],
firewall_policy['ruleTupleCount'], metric_labels, timestamp=timestamp)
if not current_tuples_limit == None and current_tuples_limit > 0:
metrics.append_data_to_series_buffer(
config, metrics_dict["metrics_per_firewall_policy"]
[f"firewall_policy_tuples"]["limit"]["name"], current_tuples_limit,
metric_labels, timestamp=timestamp)
metrics.append_data_to_series_buffer(
config, metrics_dict["metrics_per_firewall_policy"]
[f"firewall_policy_tuples"]["utilization"]["name"],
firewall_policy['ruleTupleCount'] / current_tuples_limit,
metric_labels, timestamp=timestamp)
print(f"Buffered number tuples per Firewall Policy")

View File

@ -14,6 +14,8 @@
# limitations under the License.
#
import time
from collections import defaultdict
from google.protobuf import field_mask_pb2
from . import metrics, networks, limits
@ -75,15 +77,17 @@ def get_forwarding_rules_data(config, metrics_dict, forwarding_rules_dict,
Returns:
None
'''
for project in config["monitored_projects"]:
network_dict = networks.get_networks(config, project)
timestamp = time.time()
for project_id in config["monitored_projects"]:
network_dict = networks.get_networks(config, project_id)
current_quota_limit = limits.get_quota_current_limit(
config, f"projects/{project}", config["limit_names"][layer])
config, f"projects/{project_id}", config["limit_names"][layer])
if current_quota_limit is None:
print(
f"Could not write {layer} forwarding rules to metric for projects/{project} due to missing quotas"
f"Could not determine {layer} forwarding rules to metric for projects/{project_id} due to missing quotas"
)
continue
@ -95,20 +99,24 @@ def get_forwarding_rules_data(config, metrics_dict, forwarding_rules_dict,
usage = 0
if net['self_link'] in forwarding_rules_dict:
usage = forwarding_rules_dict[net['self_link']]
metrics.write_data_to_metric(
config, project, usage, metrics_dict["metrics_per_network"]
metric_labels = {
'project': project_id,
'network_name': net['network_name']
}
metrics.append_data_to_series_buffer(
config, metrics_dict["metrics_per_network"]
[f"{layer.lower()}_forwarding_rules_per_network"]["usage"]["name"],
net['network_name'])
metrics.write_data_to_metric(
config, project, net['limit'], metrics_dict["metrics_per_network"]
usage, metric_labels, timestamp=timestamp)
metrics.append_data_to_series_buffer(
config, metrics_dict["metrics_per_network"]
[f"{layer.lower()}_forwarding_rules_per_network"]["limit"]["name"],
net['network_name'])
metrics.write_data_to_metric(
config, project, usage / net['limit'],
metrics_dict["metrics_per_network"]
net['limit'], metric_labels, timestamp=timestamp)
metrics.append_data_to_series_buffer(
config, metrics_dict["metrics_per_network"]
[f"{layer.lower()}_forwarding_rules_per_network"]["utilization"]
["name"], net['network_name'])
["name"], usage / net['limit'], metric_labels, timestamp=timestamp)
print(
f"Wrote number of {layer} forwarding rules to metric for projects/{project}"
)
f"Buffered number of {layer} forwarding rules to metric for projects/{project_id}"
)

View File

@ -14,6 +14,8 @@
# limitations under the License.
#
import time
from code import interact
from collections import defaultdict
from . import metrics, networks, limits
@ -61,15 +63,16 @@ def get_gce_instances_data(config, metrics_dict, gce_instance_dict, limit_dict):
Returns:
gce_instance_dict
'''
for project in config["monitored_projects"]:
network_dict = networks.get_networks(config, project)
timestamp = time.time()
for project_id in config["monitored_projects"]:
network_dict = networks.get_networks(config, project_id)
current_quota_limit = limits.get_quota_current_limit(
config, f"projects/{project}", config["limit_names"]["GCE_INSTANCES"])
config, f"projects/{project_id}",
config["limit_names"]["GCE_INSTANCES"])
if current_quota_limit is None:
print(
f"Could not write number of instances for projects/{project} due to missing quotas"
f"Could not determine number of instances for projects/{project_id} due to missing quotas"
)
current_quota_limit_view = metrics.customize_quota_view(current_quota_limit)
@ -81,15 +84,19 @@ def get_gce_instances_data(config, metrics_dict, gce_instance_dict, limit_dict):
if net['self_link'] in gce_instance_dict:
usage = gce_instance_dict[net['self_link']]
metrics.write_data_to_metric(
config, project, usage, metrics_dict["metrics_per_network"]
["instance_per_network"]["usage"]["name"], net['network_name'])
metrics.write_data_to_metric(
config, project, net['limit'], metrics_dict["metrics_per_network"]
["instance_per_network"]["limit"]["name"], net['network_name'])
metrics.write_data_to_metric(
config, project, usage / net['limit'],
metrics_dict["metrics_per_network"]["instance_per_network"]
["utilization"]["name"], net['network_name'])
metric_labels = {
'project': project_id,
'network_name': net['network_name']
}
metrics.append_data_to_series_buffer(
config, metrics_dict["metrics_per_network"]["instance_per_network"]
["usage"]["name"], usage, metric_labels, timestamp=timestamp)
metrics.append_data_to_series_buffer(
config, metrics_dict["metrics_per_network"]["instance_per_network"]
["limit"]["name"], net['limit'], metric_labels, timestamp=timestamp)
metrics.append_data_to_series_buffer(
config, metrics_dict["metrics_per_network"]["instance_per_network"]
["utilization"]["name"], usage / net['limit'], metric_labels,
timestamp=timestamp)
print(f"Wrote number of instances to metric for projects/{project}")
print(f"Buffered number of instances to metric for projects/{project_id}")

View File

@ -14,6 +14,8 @@
# limitations under the License.
#
import time
from google.api_core import exceptions
from google.cloud import monitoring_v3
from . import metrics
@ -173,6 +175,8 @@ def count_effective_limit(config, project_id, network_dict, usage_metric_name,
None
'''
timestamp = time.time()
if network_dict['peerings'] == []:
return
@ -215,11 +219,16 @@ def count_effective_limit(config, project_id, network_dict, usage_metric_name,
# Calculates effective limit: Step 4: Find maximum from step 1 and step 3
effective_limit = max(limit_step1, limit_step3)
utilization = peering_group_usage / effective_limit
metrics.write_data_to_metric(config, project_id, peering_group_usage,
usage_metric_name, network_dict['network_name'])
metrics.write_data_to_metric(config, project_id, effective_limit,
limit_metric_name, network_dict['network_name'])
metrics.write_data_to_metric(config, project_id, utilization,
utilization_metric_name,
network_dict['network_name'])
metric_labels = {
'project': project_id,
'network_name': network_dict['network_name']
}
metrics.append_data_to_series_buffer(config, usage_metric_name,
peering_group_usage, metric_labels,
timestamp=timestamp)
metrics.append_data_to_series_buffer(config, limit_metric_name,
effective_limit, metric_labels,
timestamp=timestamp)
metrics.append_data_to_series_buffer(config, utilization_metric_name,
utilization, metric_labels,
timestamp=timestamp)

View File

@ -14,12 +14,16 @@
# limitations under the License.
#
from curses import KEY_MARK
import re
import time
import yaml
from google.api import metric_pb2 as ga_metric
from google.cloud import monitoring_v3
from . import peerings, limits, networks
BUFFER_LEN = 10
def create_metrics(monitoring_project):
'''
@ -84,35 +88,32 @@ def create_metric(metric_name, description, monitoring_project):
print("Created {}.".format(descriptor.name))
def write_data_to_metric(config, monitored_project_id, value, metric_name,
network_name=None, subnet_id=None):
def append_data_to_series_buffer(config, metric_name, metric_value,
metric_labels, timestamp=None):
'''
Writes data to Cloud Monitoring custom metrics.
Parameters:
config (dict): The dict containing config like clients and limits
monitored_project_id: ID of the project where the resource lives (will be added as a label)
value (int): Value for the data point of the metric.
metric_name (string): Name of the metric
network_name (string): Name of the network (will be added as a label)
subnet_id (string): Identifier of the Subnet (region/name of the subnet)
metric_value (int): Value for the data point of the metric.
matric_labels (dictionary of dictionary of string: string): metric labels names and values
timestamp (float): seconds since the epoch, in UTC
Returns:
usage (int): Current usage for that network.
limit (int): Current usage for that network.
'''
client = monitoring_v3.MetricServiceClient()
series = monitoring_v3.TimeSeries()
series.metric.type = f"custom.googleapis.com/{metric_name}"
series.resource.type = "global"
series.metric.labels["project"] = monitored_project_id
if network_name != None:
series.metric.labels["network_name"] = network_name
if subnet_id != None:
series.metric.labels["subnet_id"] = subnet_id
now = time.time()
seconds = int(now)
nanos = int((now - seconds) * 10**9)
for label_name in metric_labels:
if (metric_labels[label_name] != None):
series.metric.labels[label_name] = metric_labels[label_name]
timestamp = timestamp if timestamp != None else time.time()
seconds = int(timestamp)
nanos = int((timestamp - seconds) * 10**9)
interval = monitoring_v3.TimeInterval(
{"end_time": {
"seconds": seconds,
@ -121,20 +122,39 @@ def write_data_to_metric(config, monitored_project_id, value, metric_name,
point = monitoring_v3.Point({
"interval": interval,
"value": {
"double_value": value
"double_value": metric_value
}
})
series.points = [point]
# TODO: sometimes this cashes with 'DeadlineExceeded: 504 Deadline expired before operation could complete' error
# Implement exponential backoff retries?
config["series_buffer"].append(series)
if len(config["series_buffer"]) >= BUFFER_LEN:
flush_series_buffer(config)
def flush_series_buffer(config):
'''
writes buffered metrics to Google Cloud Monitoring, empties buffer upon failure
config (dict): The dict containing config like clients and limits
'''
try:
client.create_time_series(name=config["monitoring_project_link"],
time_series=[series])
if config["series_buffer"] and len(config["series_buffer"]) > 0:
client = monitoring_v3.MetricServiceClient()
client.create_time_series(name=config["monitoring_project_link"],
time_series=config["series_buffer"])
series_names = [
re.search("\/(.+$)", series.metric.type).group(1)
for series in config["series_buffer"]
]
print("Wrote time series: ", series_names)
except Exception as e:
print("Error while writing data point for metric", metric_name)
print("Error while flushing series buffer")
print(e)
config["series_buffer"] = []
def get_pgg_data(config, metric_dict, usage_dict, limit_metric, limit_dict):
'''
@ -148,18 +168,18 @@ def get_pgg_data(config, metric_dict, usage_dict, limit_metric, limit_dict):
Returns:
None
'''
for project in config["monitored_projects"]:
network_dict_list = peerings.gather_peering_data(config, project)
for project_id in config["monitored_projects"]:
network_dict_list = peerings.gather_peering_data(config, project_id)
# Network dict list is a list of dictionary (one for each network)
# For each network, this dictionary contains:
# project_id, network_name, network_id, usage, limit, peerings (list of peered networks)
# peerings is a list of dictionary (one for each peered network) and contains:
# project_id, network_name, network_id
current_quota_limit = limits.get_quota_current_limit(
config, f"projects/{project}", limit_metric)
config, f"projects/{project_id}", limit_metric)
if current_quota_limit is None:
print(
f"Could not write number of L7 forwarding rules to metric for projects/{project} due to missing quotas"
f"Could not determine number of L7 forwarding rules to metric for projects/{project_id} due to missing quotas"
)
continue
@ -169,10 +189,10 @@ def get_pgg_data(config, metric_dict, usage_dict, limit_metric, limit_dict):
for network_dict in network_dict_list:
if network_dict['network_id'] == 0:
print(
f"Could not write {metric_dict['usage']['name']} for peering group {network_dict['network_name']} in {project} due to missing permissions."
f"Could not determine {metric_dict['usage']['name']} for peering group {network_dict['network_name']} in {project_id} due to missing permissions."
)
continue
network_link = f"https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network_dict['network_name']}"
network_link = f"https://www.googleapis.com/compute/v1/projects/{project_id}/global/networks/{network_dict['network_name']}"
limit = networks.get_limit_network(network_dict, network_link,
current_quota_limit_view, limit_dict)
@ -197,7 +217,7 @@ def get_pgg_data(config, metric_dict, usage_dict, limit_metric, limit_dict):
limit_metric)
if current_peered_quota_limit is None:
print(
f"Could not write metrics for peering to projects/{peered_network_dict['project_id']} due to missing quotas"
f"Could not determine metrics for peering to projects/{peered_network_dict['project_id']} due to missing quotas"
)
continue
@ -211,13 +231,13 @@ def get_pgg_data(config, metric_dict, usage_dict, limit_metric, limit_dict):
peered_network_dict["usage"] = peered_usage
peered_network_dict["limit"] = peered_limit
limits.count_effective_limit(config, project, network_dict,
limits.count_effective_limit(config, project_id, network_dict,
metric_dict["usage"]["name"],
metric_dict["limit"]["name"],
metric_dict["utilization"]["name"],
limit_dict)
print(
f"Wrote {metric_dict['usage']['name']} for peering group {network_dict['network_name']} in {project}"
f"Wrote {metric_dict['usage']['name']} for peering group {network_dict['network_name']} in {project_id}"
)

View File

@ -14,6 +14,8 @@
# limitations under the License.
#
import time
from . import metrics, networks, limits
@ -28,40 +30,53 @@ def get_vpc_peering_data(config, metrics_dict, limit_dict):
Returns:
None
'''
timestamp = time.time()
for project in config["monitored_projects"]:
active_vpc_peerings, vpc_peerings = gather_vpc_peerings_data(
config, project, limit_dict)
for peering in active_vpc_peerings:
metrics.write_data_to_metric(
config, project, peering['active_peerings'],
metrics_dict["metrics_per_network"]["vpc_peering_active_per_network"]
["usage"]["name"], peering['network_name'])
metrics.write_data_to_metric(
config, project, peering['network_limit'],
metrics_dict["metrics_per_network"]["vpc_peering_active_per_network"]
["limit"]["name"], peering['network_name'])
metrics.write_data_to_metric(
config, project,
peering['active_peerings'] / peering['network_limit'],
metrics_dict["metrics_per_network"]["vpc_peering_active_per_network"]
["utilization"]["name"], peering['network_name'])
print("Wrote number of active VPC peerings to custom metric for project:",
project)
metric_labels = {
'project': project,
'network_name': peering['network_name']
}
metrics.append_data_to_series_buffer(
config, metrics_dict["metrics_per_network"]
["vpc_peering_active_per_network"]["usage"]["name"],
peering['active_peerings'], metric_labels, timestamp=timestamp)
metrics.append_data_to_series_buffer(
config, metrics_dict["metrics_per_network"]
["vpc_peering_active_per_network"]["limit"]["name"],
peering['network_limit'], metric_labels, timestamp=timestamp)
metrics.append_data_to_series_buffer(
config, metrics_dict["metrics_per_network"]
["vpc_peering_active_per_network"]["utilization"]["name"],
peering['active_peerings'] / peering['network_limit'], metric_labels,
timestamp=timestamp)
print(
"Buffered number of active VPC peerings to custom metric for project:",
project)
for peering in vpc_peerings:
metrics.write_data_to_metric(
config, project, peering['peerings'],
metrics_dict["metrics_per_network"]["vpc_peering_per_network"]
["usage"]["name"], peering['network_name'])
metrics.write_data_to_metric(
config, project, peering['network_limit'],
metrics_dict["metrics_per_network"]["vpc_peering_per_network"]
["limit"]["name"], peering['network_name'])
metrics.write_data_to_metric(
config, project, peering['peerings'] / peering['network_limit'],
metrics_dict["metrics_per_network"]["vpc_peering_per_network"]
["utilization"]["name"], peering['network_name'])
print("Wrote number of VPC peerings to custom metric for project:", project)
metric_labels = {
'project': project,
'network_name': peering['network_name']
}
metrics.append_data_to_series_buffer(
config, metrics_dict["metrics_per_network"]["vpc_peering_per_network"]
["usage"]["name"], peering['peerings'], metric_labels,
timestamp=timestamp)
metrics.append_data_to_series_buffer(
config, metrics_dict["metrics_per_network"]["vpc_peering_per_network"]
["limit"]["name"], peering['network_limit'], metric_labels,
timestamp=timestamp)
metrics.append_data_to_series_buffer(
config, metrics_dict["metrics_per_network"]["vpc_peering_per_network"]
["utilization"]["name"],
peering['peerings'] / peering['network_limit'], metric_labels,
timestamp=timestamp)
print("Buffered number of VPC peerings to custom metric for project:",
project)
def gather_peering_data(config, project_id):

View File

@ -14,6 +14,8 @@
# limitations under the License.
#
import time
from collections import defaultdict
from . import metrics, networks, limits, peerings, routers
@ -88,16 +90,17 @@ def get_dynamic_routes(config, metrics_dict, limits_dict):
routers_dict = routers.get_routers(config)
dynamic_routes_dict = defaultdict(int)
for project_id in config["monitored_projects"]:
network_dict = networks.get_networks(config, project_id)
timestamp = time.time()
for project in config["monitored_projects"]:
network_dict = networks.get_networks(config, project)
for network in network_dict:
sum_routes = get_routes_for_network(config, network['self_link'],
project_id, routers_dict)
dynamic_routes_dict[network['self_link']] = sum_routes
for net in network_dict:
sum_routes = get_routes_for_network(config, net['self_link'], project,
routers_dict)
dynamic_routes_dict[net['self_link']] = sum_routes
if network['self_link'] in limits_dict:
limit = limits_dict[network['self_link']]
if net['self_link'] in limits_dict:
limit = limits_dict[net['self_link']]
else:
if 'default_value' in limits_dict:
limit = limits_dict['default_value']
@ -106,21 +109,21 @@ def get_dynamic_routes(config, metrics_dict, limits_dict):
break
utilization = sum_routes / limit
metric_labels = {'project': project, 'network_name': net['network_name']}
metrics.append_data_to_series_buffer(
config, metrics_dict["metrics_per_network"]
["dynamic_routes_per_network"]["usage"]["name"], sum_routes,
metric_labels, timestamp=timestamp)
metrics.append_data_to_series_buffer(
config, metrics_dict["metrics_per_network"]
["dynamic_routes_per_network"]["limit"]["name"], limit, metric_labels,
timestamp=timestamp)
metrics.append_data_to_series_buffer(
config, metrics_dict["metrics_per_network"]
["dynamic_routes_per_network"]["utilization"]["name"], utilization,
metric_labels, timestamp=timestamp)
metrics.write_data_to_metric(
config, project_id, sum_routes, metrics_dict["metrics_per_network"]
["dynamic_routes_per_network"]["usage"]["name"],
network['network_name'])
metrics.write_data_to_metric(
config, project_id, limit, metrics_dict["metrics_per_network"]
["dynamic_routes_per_network"]["limit"]["name"],
network['network_name'])
metrics.write_data_to_metric(
config, project_id, utilization, metrics_dict["metrics_per_network"]
["dynamic_routes_per_network"]["utilization"]["name"],
network['network_name'])
print("Wrote metrics for dynamic routes for VPCs in project", project_id)
print("Buffered metrics for dynamic routes for VPCs in project", project)
return dynamic_routes_dict

View File

@ -14,6 +14,8 @@
# limitations under the License.
#
import time
from . import metrics
from google.protobuf import field_mask_pb2
from google.protobuf.json_format import MessageToDict
@ -225,6 +227,7 @@ def get_subnets(config, metrics_dict):
# Updates all_subnets_dict with the IP utilization info
compute_subnet_utilization(config, all_subnets_dict)
timestamp = time.time()
for project_id in config["monitored_projects"]:
if project_id not in all_subnets_dict:
continue
@ -236,18 +239,23 @@ def get_subnets(config, metrics_dict):
# Building unique identifier with subnet region/name
subnet_id = f"{subnet_dict['region']}/{subnet_dict['name']}"
metrics.write_data_to_metric(
config, project_id, subnet_dict['used_ip_addresses'],
metrics_dict["metrics_per_subnet"]["ip_usage_per_subnet"]["usage"]
["name"], subnet_dict['network_name'], subnet_id)
metrics.write_data_to_metric(
config, project_id, subnet_dict['total_ip_addresses'],
metrics_dict["metrics_per_subnet"]["ip_usage_per_subnet"]["limit"]
["name"], subnet_dict['network_name'], subnet_id)
metrics.write_data_to_metric(
config, project_id, ip_utilization, metrics_dict["metrics_per_subnet"]
["ip_usage_per_subnet"]["utilization"]["name"],
subnet_dict['network_name'], subnet_id)
metric_labels = {
'project': project_id,
'network_name': subnet_dict['network_name'],
'subnet_id': subnet_id
}
metrics.append_data_to_series_buffer(
config, metrics_dict["metrics_per_subnet"]["ip_usage_per_subnet"]
["usage"]["name"], subnet_dict['used_ip_addresses'], metric_labels,
timestamp=timestamp)
metrics.append_data_to_series_buffer(
config, metrics_dict["metrics_per_subnet"]["ip_usage_per_subnet"]
["limit"]["name"], subnet_dict['total_ip_addresses'], metric_labels,
timestamp=timestamp)
metrics.append_data_to_series_buffer(
config, metrics_dict["metrics_per_subnet"]["ip_usage_per_subnet"]
["utilization"]["name"], ip_utilization, metric_labels,
timestamp=timestamp)
print("Wrote metrics for subnet ip utilization for VPCs in project",
print("Buffered metrics for subnet ip utilization for VPCs in project",
project_id)

View File

@ -15,11 +15,13 @@
#
import re
import time
from collections import defaultdict
from pydoc import doc
from collections import defaultdict
from google.protobuf import field_mask_pb2
from . import metrics, networks, limits, peerings, routers
from . import metrics, networks, limits
def get_firewalls_dict(config: dict):
@ -69,43 +71,51 @@ def get_firewalls_data(config, metrics_dict, project_quotas_dict,
Parameters:
config (dict): The dict containing config like clients and limits
metrics_dict (dictionary of dictionary of string: string): metrics names and descriptions.
limit_dict (dictionary of string:int): Dictionary with the network link as key and the limit as value.
firewalls_dict (dictionary of dictionary): Keys are projects, subkeys are networks, values count #of VPC Firewall Rules
project_quotas_dict (dictionary of string:int): Dictionary with the network link as key and the limit as value.
firewalls_dict (dictionary of of dictionary of string: string): Keys are projects, subkeys are networks, values count #of VPC Firewall Rules
Returns:
None
'''
for project in config["monitored_projects"]:
current_quota_limit = project_quotas_dict[project]['global']["firewalls"]
timestamp = time.time()
for project_id in config["monitored_projects"]:
current_quota_limit = project_quotas_dict[project_id]['global']["firewalls"]
if current_quota_limit is None:
print(
f"Could not write VPC firewal rules to metric for projects/{project} due to missing quotas"
f"Could not determine VPC firewal rules to metric for projects/{project_id} due to missing quotas"
)
continue
network_dict = networks.get_networks(config, project)
network_dict = networks.get_networks(config, project_id)
project_usage = 0
for net in network_dict:
usage = 0
if project in firewalls_dict and net['network_name'] in firewalls_dict[
project]:
usage = firewalls_dict[project][net['network_name']]
if project_id in firewalls_dict and net['network_name'] in firewalls_dict[
project_id]:
usage = firewalls_dict[project_id][net['network_name']]
project_usage += usage
metrics.write_data_to_metric(
config, project, usage,
metric_labels = {
'project': project_id,
'network_name': net['network_name']
}
metrics.append_data_to_series_buffer(
config,
metrics_dict["metrics_per_project"][f"firewalls"]["usage"]["name"],
net['network_name'])
usage, metric_labels, timestamp=timestamp)
metric_labels = {'project': project_id}
# firewall quotas are per project, not per single VPC
metrics.write_data_to_metric(
config, project, current_quota_limit['limit'],
metrics_dict["metrics_per_project"][f"firewalls"]["limit"]["name"])
metrics.write_data_to_metric(
config, project, project_usage / current_quota_limit['limit']
if current_quota_limit['limit'] != 0 else 0,
metrics_dict["metrics_per_project"][f"firewalls"]["utilization"]
["name"])
metrics.append_data_to_series_buffer(
config,
metrics_dict["metrics_per_project"][f"firewalls"]["limit"]["name"],
current_quota_limit['limit'], metric_labels, timestamp=timestamp)
metrics.append_data_to_series_buffer(
config, metrics_dict["metrics_per_project"][f"firewalls"]["utilization"]
["name"], project_usage / current_quota_limit['limit']
if current_quota_limit['limit'] != 0 else 0, metric_labels,
timestamp=timestamp)
print(
f"Wrote number of VPC Firewall Rules to metric for projects/{project}")
f"Buffered number of VPC Firewall Rules to metric for projects/{project_id}"
)