2018-01-25 01:12:00 -08:00
|
|
|
#!/usr/bin/env python2
|
2020-01-29 01:19:48 -08:00
|
|
|
# ver 1.2 CL2020
|
2018-01-27 05:51:28 -08:00
|
|
|
|
2018-01-25 01:12:00 -08:00
|
|
|
from flask import Flask
|
|
|
|
from flask import Response
|
|
|
|
import requests
|
2020-01-29 01:19:48 -08:00
|
|
|
from requests.auth import HTTPBasicAuth
|
2018-01-25 01:12:00 -08:00
|
|
|
import json
|
|
|
|
import time
|
|
|
|
from gevent.wsgi import WSGIServer
|
2018-01-26 04:38:52 -08:00
|
|
|
#
|
|
|
|
import logging
|
|
|
|
import logging.handlers
|
|
|
|
#
|
2018-01-27 05:51:28 -08:00
|
|
|
import hx_creds
|
2018-01-27 05:53:00 -08:00
|
|
|
|
2018-01-25 01:12:00 -08:00
|
|
|
'''
|
|
|
|
the hx_creds.py looks like this:
|
|
|
|
|
|
|
|
hosts=[{'host':'10.1.1.1', 'username':'local/root', 'password':'*******'},{'host':'10.1.1.2', 'username':'local/root', 'password':'****'}]
|
|
|
|
|
2019-01-22 08:40:31 -08:00
|
|
|
|
2018-01-25 01:12:00 -08:00
|
|
|
'''
|
2018-01-26 06:13:10 -08:00
|
|
|
#
|
2020-01-29 01:19:48 -08:00
|
|
|
server_IP ='10.100.252.13'
|
2018-01-26 06:13:10 -08:00
|
|
|
server_port = '8082'
|
|
|
|
|
2018-01-26 04:38:52 -08:00
|
|
|
# Logging config
|
2018-01-26 06:13:10 -08:00
|
|
|
logFile="hx_stats_%s_%s.log"%(server_IP,server_port)
|
2018-01-26 04:38:52 -08:00
|
|
|
logCount=4
|
|
|
|
logBytes=1048576
|
2018-01-25 01:12:00 -08:00
|
|
|
|
|
|
|
# suppress the unverified request messages (when using self-signed certificates)
|
|
|
|
requests.packages.urllib3.disable_warnings()
|
|
|
|
|
|
|
|
# cache the credentials, if you keep requesting this you will hit th 256 open session/user limit
|
|
|
|
tokens = {}
|
|
|
|
|
|
|
|
app = Flask('HX Stats')
|
|
|
|
|
|
|
|
# gets called via the http://your_server_ip:port/metrics
|
|
|
|
@app.route('/metrics')
|
|
|
|
def get_stats():
|
|
|
|
results =''
|
|
|
|
for host in hx_creds.hosts:
|
2019-01-22 08:40:31 -08:00
|
|
|
logger.info("----------- Processing Host: %s -----------"%host['host'])
|
2018-01-25 01:12:00 -08:00
|
|
|
url = "https://"+host['host']
|
2018-01-26 04:38:52 -08:00
|
|
|
# uri for throughput data with "last 5 min" filter
|
2020-01-29 01:19:48 -08:00
|
|
|
uri_MBps = '/stats?target=stats.counters.scvmclient.allhosts.nfsBytesRead.cluster.rate&target=stats.counters.scvmclient.allhosts.nfsBytesWritten.cluster.rate&format=json&from=-5min'
|
2018-01-26 04:38:52 -08:00
|
|
|
# Get throughput data
|
2020-01-29 01:19:48 -08:00
|
|
|
MBps_data = get_stats(host['username'],host['password'],url+uri_MBps)
|
2018-01-26 04:38:52 -08:00
|
|
|
if MBps_data:
|
|
|
|
try:
|
|
|
|
MBps_Read=round(MBps_data[0]['datapoints'][-2][0],3)
|
|
|
|
MBps_Write=round(MBps_data[1]['datapoints'][-2][0],3)
|
2019-01-22 08:40:31 -08:00
|
|
|
logger.info("Got MBps info")
|
2018-01-27 05:51:28 -08:00
|
|
|
|
2018-01-26 04:38:52 -08:00
|
|
|
# build the results
|
|
|
|
results += 'MBps_Read{host="%s"} %s\n'%(host['host'],str(MBps_Read))
|
|
|
|
results += 'MBps_Write{host="%s"} %s\n'%(host['host'],str(MBps_Write))
|
2019-01-22 08:40:31 -08:00
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
logger.error(e)
|
2018-01-26 04:38:52 -08:00
|
|
|
logger.error("Couldn't parse returned throughput data")
|
|
|
|
pass
|
|
|
|
|
|
|
|
# url to get the IOPS data
|
2020-01-29 01:19:48 -08:00
|
|
|
uri_IOPS = '/stats?target=stats.counters.scvmclient.allhosts.nfsReads.cluster.rate&target=stats.counters.scvmclient.allhosts.nfsWrites.cluster.rate&format=json&from=-5min'
|
2018-01-26 04:38:52 -08:00
|
|
|
# get IOPS data
|
2020-01-29 01:19:48 -08:00
|
|
|
IOPS_data = get_stats(host['username'],host['password'],url+uri_IOPS)
|
2018-01-26 04:38:52 -08:00
|
|
|
if IOPS_data:
|
|
|
|
try:
|
|
|
|
IOPS_Read=round(IOPS_data[0]['datapoints'][-2][0],3)
|
|
|
|
IOPS_Write=round(IOPS_data[1]['datapoints'][-2][0],3)
|
2019-01-22 08:40:31 -08:00
|
|
|
logger.info("Got IOPS info")
|
2018-01-27 05:51:28 -08:00
|
|
|
|
2018-01-26 04:38:52 -08:00
|
|
|
# build the results
|
|
|
|
results += 'IOPS_Read{host="%s"} %s\n'%(host['host'],str(IOPS_Read))
|
|
|
|
results += 'IOPS_Write{host="%s"} %s\n'%(host['host'],str(IOPS_Write))
|
2019-01-22 08:40:31 -08:00
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
logger.error(e)
|
2018-01-26 04:38:52 -08:00
|
|
|
logger.error("Couldn't parse returned IOPS data")
|
|
|
|
pass
|
|
|
|
|
|
|
|
# url to get Latency data
|
2020-01-29 01:19:48 -08:00
|
|
|
uri_Lat ='/stats?target=divideSeries(stats.timers.scvmclient.allhosts.nfsReadLatency.cluster.total%2Cstats.counters.scvmclient.allhosts.nfsReads.cluster.count)&target=divideSeries(stats.timers.scvmclient.allhosts.nfsWriteLatency.cluster.total%2Cstats.counters.scvmclient.allhosts.nfsWrites.cluster.count)&format=json&from=-5min'
|
2018-01-26 04:38:52 -08:00
|
|
|
|
|
|
|
# get latency data
|
2020-01-29 01:19:48 -08:00
|
|
|
Lat_data = get_stats(host['username'],host['password'],url+uri_Lat)
|
2018-01-26 04:38:52 -08:00
|
|
|
if Lat_data:
|
|
|
|
try:
|
|
|
|
Lat_Read=round(Lat_data[0]['datapoints'][-2][0],3)
|
2018-01-27 05:51:28 -08:00
|
|
|
Lat_Write=round(Lat_data[1]['datapoints'][-2][0],3)
|
2019-01-22 08:40:31 -08:00
|
|
|
logger.info("Got Latency info")
|
|
|
|
|
2018-01-27 05:51:28 -08:00
|
|
|
# build the results
|
2018-01-26 04:38:52 -08:00
|
|
|
results += 'Lat_Read{host="%s"} %s\n'%(host['host'],str(Lat_Read))
|
|
|
|
results += 'Lat_Write{host="%s"} %s\n'%(host['host'],str(Lat_Write))
|
2019-01-22 08:40:31 -08:00
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
logger.error(e)
|
2018-01-26 04:38:52 -08:00
|
|
|
logger.error("Couldn't parse returned latency data")
|
|
|
|
pass
|
|
|
|
#
|
2018-01-27 05:51:28 -08:00
|
|
|
# When processing data I'm taking one before last record ([-2]), as sometimes the last record is None
|
2018-01-26 04:38:52 -08:00
|
|
|
#
|
2018-01-25 01:12:00 -08:00
|
|
|
# return the results to the caller
|
2019-01-22 08:40:31 -08:00
|
|
|
logger.info("----------- Finished -----------")
|
2018-01-25 01:12:00 -08:00
|
|
|
return Response(results, mimetype='text/plain')
|
|
|
|
|
2018-01-26 04:38:52 -08:00
|
|
|
#
|
|
|
|
# calls HX API
|
|
|
|
#
|
2020-01-29 01:19:48 -08:00
|
|
|
def get_stats(username, password, url):
|
2018-01-26 04:38:52 -08:00
|
|
|
logger.info("call for get_stats")
|
2018-01-25 01:12:00 -08:00
|
|
|
try:
|
2020-01-29 01:19:48 -08:00
|
|
|
headers = {'Connection':'close'}
|
|
|
|
|
|
|
|
#logger.info("username: %s"%username)
|
|
|
|
#logger.info("pass: %s"%password)
|
|
|
|
#logger.info("url: %s"%url)
|
|
|
|
#logger.info("headers: %s"%headers)
|
|
|
|
|
|
|
|
response = requests.get(url, auth=HTTPBasicAuth('admin',password), headers=headers, verify=False,timeout=4)
|
|
|
|
|
|
|
|
#logger.info("status code:%s"%str(response.status_code))
|
|
|
|
|
2018-01-25 01:12:00 -08:00
|
|
|
if response.status_code == 200:
|
2018-01-26 04:38:52 -08:00
|
|
|
logger.info("Got data ok")
|
2018-01-25 01:12:00 -08:00
|
|
|
return response.json()
|
2018-01-26 04:38:52 -08:00
|
|
|
logger.error("Failed to get data "+response.content)
|
|
|
|
return None
|
2020-01-29 01:19:48 -08:00
|
|
|
|
2018-01-26 04:38:52 -08:00
|
|
|
except Exception as e:
|
2018-01-26 06:13:10 -08:00
|
|
|
logger.error("Post for data failed \n"+str(e))
|
2018-01-25 01:12:00 -08:00
|
|
|
return None
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
2018-01-26 04:38:52 -08:00
|
|
|
print "Service Started"
|
|
|
|
# Enable logging
|
|
|
|
logger = logging.getLogger("HX-Stats")
|
|
|
|
logger.setLevel(logging.DEBUG)
|
|
|
|
handler = logging.handlers.RotatingFileHandler(logFile, maxBytes=logBytes, backupCount=logCount)
|
|
|
|
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
|
|
|
|
handler.setFormatter(formatter)
|
|
|
|
logger.addHandler(handler)
|
|
|
|
logger.info("-"*25)
|
|
|
|
logger.info("HX Stats script started")
|
2018-01-27 05:51:28 -08:00
|
|
|
|
2018-01-26 06:13:10 -08:00
|
|
|
http_server = WSGIServer((server_IP, int(server_port)), app, log = logger)
|
2018-01-25 01:12:00 -08:00
|
|
|
http_server.serve_forever()
|