Updating network dashboard: fixing Cloud SQL problem, fixing 1 metric… (#1806)

* Updating network dashboard: fixing Cloud SQL problem, fixing 1 metric issue in the dashboard, pausing for monitoring quota issues, if monitored folders and projects are empty, every project under the discovery root node will be monitored.

* formatting

* time optimization

---------

Co-authored-by: Ludovico Magnocavallo <ludomagno@google.com>
This commit is contained in:
Aurélien Legrand 2023-10-25 12:37:25 +02:00 committed by GitHub
parent 33ce0e1db5
commit e10aabdc22
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 24 additions and 5 deletions

View File

@ -59,7 +59,7 @@
"alignmentPeriod": "3600s",
"perSeriesAligner": "ALIGN_NEXT_OLDER"
},
"filter": "metric.type=\"custom.googleapis.com/netmon/network/forwarding_rules_l4_used_ratio\" resource.type=\"global\"",
"filter": "metric.type=\"custom.googleapis.com/netmon/network/forwarding_rules_l7_used_ratio\" resource.type=\"global\"",
"secondaryAggregation": {
"alignmentPeriod": "60s",
"perSeriesAligner": "ALIGN_MEAN"

View File

@ -47,7 +47,7 @@ module "pubsub" {
project_id = module.project.project_id
name = var.name
regions = [var.region]
subscriptions = { "${var.name}-default" = null }
subscriptions = {}
}
module "cloud-function" {

View File

@ -69,6 +69,13 @@ def start_discovery(resources, response=None, data=None):
LOGGER.info(f'discovery (has response: {response is not None})')
if response is None:
# return initial discovery URLs
if not resources['config:folders'] and not resources['config:projects']:
LOGGER.info(
f'No monitored project or folder given, defaulting to discovery root: {resources["config:discovery_root"]}'
)
dr_node = resources["config:discovery_root"].split("/")[0]
dr_value = resources["config:discovery_root"].split("/")[1]
yield HTTPRequest(CAI_URL.format(f'{dr_node}/{dr_value}'), {}, None)
for v in resources['config:folders']:
yield HTTPRequest(CAI_URL.format(f'folders/{v}'), {}, None)
for v in resources['config:projects']:

View File

@ -216,11 +216,13 @@ def _handle_sql_instances(resource, data):
'name': data['name'],
'self_link': _self_link(data['selfLink']),
'ipAddresses': [
i['ipAddress'] for i in data['ipAddresses'] if i['type'] == 'PRIVATE'
i['ipAddress']
for i in data.get('ipAddresses')
if i['type'] == 'PRIVATE'
],
'region': data['region'],
'availabilityType': data['settings']['availabilityType'],
'network': data['settings']['ipConfiguration']['privateNetwork']
'network': data['settings']['ipConfiguration'].get('privateNetwork')
}

View File

@ -17,9 +17,9 @@ import collections
import datetime
import json
import logging
import time
from . import HTTPRequest
from .utils import batched
DESCRIPTOR_TYPE_BASE = 'custom.googleapis.com/{}'
DESCRIPTOR_URL = ('https://content-monitoring.googleapis.com/v3'
@ -74,6 +74,7 @@ def timeseries_requests(project_id, root, timeseries, descriptors):
bucket.append(ts)
LOGGER.info(f'metric types {list(ts_buckets.keys())}')
ts_buckets = list(ts_buckets.values())
api_calls, t = 0, time.time()
while ts_buckets:
data = {'timeSeries': []}
for bucket in ts_buckets:
@ -103,4 +104,13 @@ def timeseries_requests(project_id, root, timeseries, descriptors):
tot_num = sum(len(b) for b in ts_buckets)
LOGGER.info(f'sending {req_num} remaining: {tot_num}')
yield HTTPRequest(url, HEADERS, json.dumps(data))
api_calls += 1
# Default quota is 180 request per minute per user
if api_calls >= 170:
td = time.time() - t
if td < 60:
LOGGER.info(
f'Pausing for {round(60 - td)}s to avoid monitoring quota issues')
time.sleep(60 - td)
api_calls, t = 0, time.time()
ts_buckets = [b for b in ts_buckets if b]