Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 30 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,35 @@
python-haproxy
python-haproxy collectD plugin
==============

Read haproxy stats using socket.

based on python-haproxy `git clone https://github.com/nl5887/python-haproxy.git`

Read haproxy stats using socket
before using it in CollectD
replace
```import collectd_mockup as collectd```
to
```#import collectd```


and comment out at the bottom
```read_callback()```


data returnded to collectD looks like:
```
instance/type/type_instance: master-prod/BW_server_192.168.99.203_25036_2@test_sql_r36/qcur = 0
instance/type/type_instance: master-prod/BW_server_192.168.99.203_25036_1@be_test_sql_r37/qcur = 0
instance/type/type_instance: master-prod/BW_server_192.168.99.203_25036_1@test_sql_r36/qcur = 0
instance/type/type_instance: master-prod/BW_server_192.168.99.203_25036_2@be_test_sql_r37/qcur = 0
instance/type/type_instance: master-prod/BE_test_sql_r36/qcur = 0
instance/type/type_instance: master-prod/FE_test_r36/qcur =
instance/type/type_instance: master-prod/FE_fe_test_r37/qcur =

```


# Info from python-haproxy

## Install
There are a few different ways you can install pyechonest:
Expand Down
146 changes: 146 additions & 0 deletions haproxy-plugin.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-


import sys
import os
import commands
import socket
import time

try:
import collectd
except:
import tools.collectd_mockup as collectd

from haproxy import haproxy
from tools.data_list import data_gathered
from tools. data_list import data_stat_index


PLUGIN_NAME = os.path.basename(__file__).split(".")[0]
stats = haproxy.HAProxyStats('/var/run/haproxy/haproxy.sock')
VERBOSE_LOGGING = True

# convert list of "arg: val" to dict[arg]=val
def str2dict(arg):
rez = {}
for x in arg:
if x:
rez[(x.split(':')[0]).strip()] = x.split(':')[1].strip()

return rez

def log_verbose(msg):
if not VERBOSE_LOGGING:
return
collectd.info('haproxy plugin [verbose]: %s' % msg)




# callback funcion for collectd Daemon
def read_callback(data=None):

metric = collectd.Values()

#
# get Haproxy stat info
#
rez = str2dict(stats.execute('show info'))
instance = rez['node'] or socket.gethostname()
# loop over keys that we need
for i in data_gathered['info']:

metric.plugin = PLUGIN_NAME
metric.interval = 60
metric.plugin_instance = instance
metric.type = 'status'
metric.type_instance = i
metric.values = (unicodedata.numeric(rez[i]),) #values must be list or tuple
# metric.dispatch()

#
# get stat info
#
f_ends = {}
b_ends = {}
b_ends_summ = {}
for x in stats.execute('show stat'):

# skiping some trash
if x.startswith("#"):
continue
if x.startswith("stats"):
continue
if x == '':
continue

# convert string into array
tmp = x.split(',')

# fill frontends
if tmp[1] == 'FRONTEND':
f_ends[tmp[0]] = tmp
continue

# fill backend summary info
if tmp[1] == 'BACKEND':
b_ends_summ[tmp[0]] = tmp
continue

# fill backen workers info
b_ends["%s@%s" % (tmp[1],tmp[0])] = tmp




# fill data for backends
for z in data_gathered['stat']:
# fill info for backend/workers
for k, v in b_ends.iteritems():
# skip empty values
if v[data_stat_index[z]]:
metric.plugin = PLUGIN_NAME
metric.interval = 60

metric.plugin_instance = instance
metric.type = 'be_workers'
metric.type_instance = "%s_%s" % (k,z)

metric.values = (v[data_stat_index[z]],) #values must be list or tuple
metric.dispatch()
# fill info for backend summary
for k, v in b_ends_summ.iteritems():
# skip empty values
if v[data_stat_index[z]]:
metric.plugin = PLUGIN_NAME
metric.interval = 60

metric.plugin_instance = instance
metric.type = "backend"
metric.type_instance = "%s_%s" % (k,z)
metric.values = (v[data_stat_index[z]],) # values must be list or tuple
metric.dispatch()
# fill info for backend summary
for k, v in f_ends.iteritems():
# skip empty values
if v[data_stat_index[z]]:
metric.plugin = PLUGIN_NAME
metric.interval = 60

metric.plugin_instance = instance
metric.type = "frontend"
metric.type_instance = "%s_%s" % (k,z)

metric.values = (v[data_stat_index[z]],) #values must be list or tuple
metric.dispatch()




#for debugging purposes
read_callback()

collectd.register_read(read_callback)

6 changes: 4 additions & 2 deletions haproxy/haproxy.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,10 @@ def execute(self, command, timeout=200):

for s in r:
if (s is client):
buffer = buffer + client.recv(16384)
running = (len(buffer)==0)
tmp = client.recv(16384)
buffer = buffer + tmp
if tmp == '':
running = (len(buffer)==0)

client.close()

Expand Down
2 changes: 2 additions & 0 deletions tools/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# -*- coding: utf-8 -*-

23 changes: 23 additions & 0 deletions tools/collectd_mockup.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
#!/usr/bin/python


class Values:
plugin = ''
interval = ''
plugin_instance = ''
type = ''
type_instance = ''
values = ''

def dispatch(self):
pass
#print "instance/type/type_instance: %s/%s/%s = %s" % (self.plugin_instance, self.type, self.type_instance, self.values)
# print ""

def info(self, arg=''):
pass


def register_read(read_callback):
pass

152 changes: 152 additions & 0 deletions tools/data_list.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
#!/usr/bin/py

data_gathered = {}

data_gathered['info'] = [
#'Name',
#'Version',
#'Release_date',
#'Nbproc',
#'Process_num',
#'Pid',
#'Uptime',
#'Uptime_sec',
#'Memmax_MB',
#'Ulimit-n',
#'Maxsock',
#'Maxconn',
#'Hard_maxconn',
#'CurrConns',
#'CumConns',
#'CumReq',
#'MaxSslConns',
#'CurrSslConns',
#'CumSslConns',
#'Maxpipes',
#'PipesUsed',
#'PipesFree',
#'ConnRate',
#'ConnRateLimit',
#'MaxConnRate',
#'SessRate',
#'SessRateLimit',
#'MaxSessRate',
#'SslRate',
#'SslRateLimit',
#'MaxSslRate',
#'SslFrontendKeyRate',
#'SslFrontendMaxKeyRate',
#'SslFrontendSessionReuse_pct',
#'SslBackendKeyRate',
#'SslBackendMaxKeyRate',
#'SslCacheLookups',
#'SslCacheMisses',
#'CompressBpsIn',
#'CompressBpsOut',
#'CompressBpsRateLim',
#'ZlibMemUsage',
#'MaxZlibMemUsage',
#'Tasks',
#'Run_queue',
#'Idle_pct',
#'node',
#'description'
]


data_gathered['stat'] = [
#'pxname',
#'svname',
'qcur',
'scur',
'stot',
'bin',
'bout',
'dreq',
'dresp',
'ereqeconeresp',
]


data_stat_index = {
'pxname':0,
'svname':1,
'qcur':2,
'qmax':3,
'scur':4,
'smax':5,
'slim':6,
'stot':7,
'bin':8,
'bout':9,
'dreq':10,
'dresp':11,
'ereqeconeresp':12,
'wretr':13,
'wredis':14,
'status':15,
'weight':16,
'act':17,
'bckchkfail':18,
'chkdown':19,
'lastchg':20,
'downtime':21,
'qlimit':22,
'pid':23,
'iid':24,
'sid':25,
'throttle':26,
'lbtot':27,
'tracked':28,
'type':29,
'rate':30,
'rate_lim':31,
'rate_max':32,
'check_status':33,
'check_code':34,
'check_duration':35,
'hrsp_1xx':36,
'hrsp_2xx':37,
'hrsp_3xx':38,
'hrsp_4xx':39,
'hrsp_5xx':40,
'hrsp_other':41,
'hanafail':42,
'req_rate':43,
'req_rate_max':44,
'req_tot':45,
'cli_abrt':46,
'srv_abrt':47,
'comp_in':48,
'comp_out':49,
'comp_byp':50,
'comp_rsp':51,
'lastsess':52,
'last_chk':53,
'last_agt':54,
'qtime':55,
'ctime':56,
'rtime':57,
'ttime':58,
'agent_status':59,
'agent_code':60,
'agent_duration':61,
'check_desc':62,
'agent_desc':63,
'check_rise':64,
'check_fall':65,
'check_health':66,
'agent_rise':67,
'agent_fall':68,
'agent_health':69,
'addr':70,
'cookie':71,
'mode':72,
'algo':73,
'conn_rate':74,
'conn_rate_max':75,
'conn_tot':76,
'intercepted':77,
'dcon':78,
'dses':79
}