diff --git a/README.md b/README.md index b00c3b9..8f2f845 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,35 @@ -python-haproxy +python-haproxy collectD plugin ============== -Read haproxy stats using socket. + +based on python-haproxy `git clone https://github.com/nl5887/python-haproxy.git` + +Read haproxy stats using socket +before using it in CollectD +replace +```import collectd_mockup as collectd``` +to +```#import collectd``` + + +and comment out at the bottom +```read_callback()``` + + +data returnded to collectD looks like: +``` +instance/type/type_instance: master-prod/BW_server_192.168.99.203_25036_2@test_sql_r36/qcur = 0 +instance/type/type_instance: master-prod/BW_server_192.168.99.203_25036_1@be_test_sql_r37/qcur = 0 +instance/type/type_instance: master-prod/BW_server_192.168.99.203_25036_1@test_sql_r36/qcur = 0 +instance/type/type_instance: master-prod/BW_server_192.168.99.203_25036_2@be_test_sql_r37/qcur = 0 +instance/type/type_instance: master-prod/BE_test_sql_r36/qcur = 0 +instance/type/type_instance: master-prod/FE_test_r36/qcur = +instance/type/type_instance: master-prod/FE_fe_test_r37/qcur = + +``` + + +# Info from python-haproxy ## Install There are a few different ways you can install pyechonest: diff --git a/haproxy-plugin.py b/haproxy-plugin.py new file mode 100755 index 0000000..05ca186 --- /dev/null +++ b/haproxy-plugin.py @@ -0,0 +1,146 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + + +import sys +import os +import commands +import socket +import time + +try: + import collectd +except: + import tools.collectd_mockup as collectd + +from haproxy import haproxy +from tools.data_list import data_gathered +from tools. data_list import data_stat_index + + +PLUGIN_NAME = os.path.basename(__file__).split(".")[0] +stats = haproxy.HAProxyStats('/var/run/haproxy/haproxy.sock') +VERBOSE_LOGGING = True + +# convert list of "arg: val" to dict[arg]=val +def str2dict(arg): + rez = {} + for x in arg: + if x: + rez[(x.split(':')[0]).strip()] = x.split(':')[1].strip() + + return rez + +def log_verbose(msg): + if not VERBOSE_LOGGING: + return + collectd.info('haproxy plugin [verbose]: %s' % msg) + + + + +# callback funcion for collectd Daemon +def read_callback(data=None): + + metric = collectd.Values() + + # + # get Haproxy stat info + # + rez = str2dict(stats.execute('show info')) + instance = rez['node'] or socket.gethostname() + # loop over keys that we need + for i in data_gathered['info']: + + metric.plugin = PLUGIN_NAME + metric.interval = 60 + metric.plugin_instance = instance + metric.type = 'status' + metric.type_instance = i + metric.values = (unicodedata.numeric(rez[i]),) #values must be list or tuple +# metric.dispatch() + + # + # get stat info + # + f_ends = {} + b_ends = {} + b_ends_summ = {} + for x in stats.execute('show stat'): + + # skiping some trash + if x.startswith("#"): + continue + if x.startswith("stats"): + continue + if x == '': + continue + + # convert string into array + tmp = x.split(',') + + # fill frontends + if tmp[1] == 'FRONTEND': + f_ends[tmp[0]] = tmp + continue + + # fill backend summary info + if tmp[1] == 'BACKEND': + b_ends_summ[tmp[0]] = tmp + continue + + # fill backen workers info + b_ends["%s@%s" % (tmp[1],tmp[0])] = tmp + + + + + # fill data for backends + for z in data_gathered['stat']: + # fill info for backend/workers + for k, v in b_ends.iteritems(): + # skip empty values + if v[data_stat_index[z]]: + metric.plugin = PLUGIN_NAME + metric.interval = 60 + + metric.plugin_instance = instance + metric.type = 'be_workers' + metric.type_instance = "%s_%s" % (k,z) + + metric.values = (v[data_stat_index[z]],) #values must be list or tuple + metric.dispatch() + # fill info for backend summary + for k, v in b_ends_summ.iteritems(): + # skip empty values + if v[data_stat_index[z]]: + metric.plugin = PLUGIN_NAME + metric.interval = 60 + + metric.plugin_instance = instance + metric.type = "backend" + metric.type_instance = "%s_%s" % (k,z) + metric.values = (v[data_stat_index[z]],) # values must be list or tuple + metric.dispatch() + # fill info for backend summary + for k, v in f_ends.iteritems(): + # skip empty values + if v[data_stat_index[z]]: + metric.plugin = PLUGIN_NAME + metric.interval = 60 + + metric.plugin_instance = instance + metric.type = "frontend" + metric.type_instance = "%s_%s" % (k,z) + + metric.values = (v[data_stat_index[z]],) #values must be list or tuple + metric.dispatch() + + + + +#for debugging purposes +read_callback() + +collectd.register_read(read_callback) + diff --git a/haproxy/haproxy.py b/haproxy/haproxy.py index bc1e86d..486f588 100644 --- a/haproxy/haproxy.py +++ b/haproxy/haproxy.py @@ -42,8 +42,10 @@ def execute(self, command, timeout=200): for s in r: if (s is client): - buffer = buffer + client.recv(16384) - running = (len(buffer)==0) + tmp = client.recv(16384) + buffer = buffer + tmp + if tmp == '': + running = (len(buffer)==0) client.close() diff --git a/tools/__init__.py b/tools/__init__.py new file mode 100644 index 0000000..633f866 --- /dev/null +++ b/tools/__init__.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- + diff --git a/tools/collectd_mockup.py b/tools/collectd_mockup.py new file mode 100644 index 0000000..e825ff3 --- /dev/null +++ b/tools/collectd_mockup.py @@ -0,0 +1,23 @@ +#!/usr/bin/python + + +class Values: + plugin = '' + interval = '' + plugin_instance = '' + type = '' + type_instance = '' + values = '' + + def dispatch(self): + pass + #print "instance/type/type_instance: %s/%s/%s = %s" % (self.plugin_instance, self.type, self.type_instance, self.values) +# print "" + +def info(self, arg=''): + pass + + +def register_read(read_callback): + pass + diff --git a/tools/data_list.py b/tools/data_list.py new file mode 100644 index 0000000..be43882 --- /dev/null +++ b/tools/data_list.py @@ -0,0 +1,152 @@ +#!/usr/bin/py + +data_gathered = {} + +data_gathered['info'] = [ +#'Name', +#'Version', +#'Release_date', +#'Nbproc', +#'Process_num', +#'Pid', +#'Uptime', +#'Uptime_sec', +#'Memmax_MB', +#'Ulimit-n', +#'Maxsock', +#'Maxconn', +#'Hard_maxconn', +#'CurrConns', +#'CumConns', +#'CumReq', +#'MaxSslConns', +#'CurrSslConns', +#'CumSslConns', +#'Maxpipes', +#'PipesUsed', +#'PipesFree', +#'ConnRate', +#'ConnRateLimit', +#'MaxConnRate', +#'SessRate', +#'SessRateLimit', +#'MaxSessRate', +#'SslRate', +#'SslRateLimit', +#'MaxSslRate', +#'SslFrontendKeyRate', +#'SslFrontendMaxKeyRate', +#'SslFrontendSessionReuse_pct', +#'SslBackendKeyRate', +#'SslBackendMaxKeyRate', +#'SslCacheLookups', +#'SslCacheMisses', +#'CompressBpsIn', +#'CompressBpsOut', +#'CompressBpsRateLim', +#'ZlibMemUsage', +#'MaxZlibMemUsage', +#'Tasks', +#'Run_queue', +#'Idle_pct', +#'node', +#'description' +] + + +data_gathered['stat'] = [ +#'pxname', +#'svname', +'qcur', +'scur', +'stot', +'bin', +'bout', +'dreq', +'dresp', +'ereqeconeresp', +] + + +data_stat_index = { +'pxname':0, +'svname':1, +'qcur':2, +'qmax':3, +'scur':4, +'smax':5, +'slim':6, +'stot':7, +'bin':8, +'bout':9, +'dreq':10, +'dresp':11, +'ereqeconeresp':12, +'wretr':13, +'wredis':14, +'status':15, +'weight':16, +'act':17, +'bckchkfail':18, +'chkdown':19, +'lastchg':20, +'downtime':21, +'qlimit':22, +'pid':23, +'iid':24, +'sid':25, +'throttle':26, +'lbtot':27, +'tracked':28, +'type':29, +'rate':30, +'rate_lim':31, +'rate_max':32, +'check_status':33, +'check_code':34, +'check_duration':35, +'hrsp_1xx':36, +'hrsp_2xx':37, +'hrsp_3xx':38, +'hrsp_4xx':39, +'hrsp_5xx':40, +'hrsp_other':41, +'hanafail':42, +'req_rate':43, +'req_rate_max':44, +'req_tot':45, +'cli_abrt':46, +'srv_abrt':47, +'comp_in':48, +'comp_out':49, +'comp_byp':50, +'comp_rsp':51, +'lastsess':52, +'last_chk':53, +'last_agt':54, +'qtime':55, +'ctime':56, +'rtime':57, +'ttime':58, +'agent_status':59, +'agent_code':60, +'agent_duration':61, +'check_desc':62, +'agent_desc':63, +'check_rise':64, +'check_fall':65, +'check_health':66, +'agent_rise':67, +'agent_fall':68, +'agent_health':69, +'addr':70, +'cookie':71, +'mode':72, +'algo':73, +'conn_rate':74, +'conn_rate_max':75, +'conn_tot':76, +'intercepted':77, +'dcon':78, +'dses':79 +}