mirror of
https://github.com/netdata/netdata.git
synced 2025-04-17 11:12:42 +00:00
[python] make units compliant with IEC standard (#4995)
* apache units fix * beanstalk * bind_rndc * boinc * ceph * chrony * couchdb * dns_query * dnsdist * dockerd * dovecot * elasticsearch by @vlvkobal <3 * example * exim * fail2ban * freeradius minor fixes * freeradius minor fixes * freeradius minor fixes * go_expvar * haproxy * hddtemp * httpcheck * icecast * ipfs * isc_dhcpd * litespeed * logind * megacli * memcached * mongodb * monit * mysql * nginx * nginx_plus * nsd * ntpd * nvidia_smi * openldap * ovpn_status * phpfm * portcheck * postfix * postgres * powerdns * proxysql * puppet * rabbitmq * redis * restroshare * samba * sensors * smartdlog * spigotmc * springboot * squid * retroshare * tomcat * retroshare * tor * traefik * traefik * unbound * uwsgi * varnish * w1sensor * web_log * ok codacy * retroshare * ipfs
This commit is contained in:
parent
7ad91b8f9a
commit
97b32703c6
62 changed files with 747 additions and 580 deletions
collectors/python.d.plugin
apache
beanstalk
bind_rndc
boinc
ceph
chrony
couchdb
dns_query_time
dnsdist
dockerd
dovecot
elasticsearch
example
exim
fail2ban
freeradius
go_expvar
haproxy
hddtemp
httpcheck
icecast
ipfs
isc_dhcpd
litespeed
logind
megacli
memcached
mongodb
monit
mysql
nginx
nginx_plus
nsd
ntpd
nvidia_smi
openldap
ovpn_status_log
phpfpm
portcheck
postfix
postgres
powerdns
proxysql
puppet
rabbitmq
redis
rethinkdbs
retroshare
samba
sensors
smartd_log
spigotmc
springboot
squid
tomcat
tor
traefik
unbound
uwsgi
varnish
w1sensor
web_log
|
@ -5,63 +5,60 @@
|
|||
|
||||
from bases.FrameworkServices.UrlService import UrlService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
# update_every = 2
|
||||
priority = 60000
|
||||
|
||||
# default job configuration (overridden by python.d.plugin)
|
||||
# config = {'local': {
|
||||
# 'update_every': update_every,
|
||||
# 'retries': retries,
|
||||
# 'priority': priority,
|
||||
# 'url': 'http://www.apache.org/server-status?auto'
|
||||
# }}
|
||||
|
||||
# charts order (can be overridden if you want less charts, or different order)
|
||||
ORDER = ['requests', 'connections', 'conns_async', 'net', 'workers', 'reqpersec', 'bytespersec', 'bytesperreq']
|
||||
ORDER = [
|
||||
'requests',
|
||||
'connections',
|
||||
'conns_async',
|
||||
'net',
|
||||
'workers',
|
||||
'reqpersec',
|
||||
'bytespersec',
|
||||
'bytesperreq',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'bytesperreq': {
|
||||
'options': [None, 'apache Lifetime Avg. Response Size', 'bytes/request',
|
||||
'options': [None, 'Lifetime Avg. Request Size', 'KiB',
|
||||
'statistics', 'apache.bytesperreq', 'area'],
|
||||
'lines': [
|
||||
['size_req']
|
||||
['size_req', 'size', 'absolute', 1, 1024 * 100000]
|
||||
]},
|
||||
'workers': {
|
||||
'options': [None, 'apache Workers', 'workers', 'workers', 'apache.workers', 'stacked'],
|
||||
'options': [None, 'Workers', 'workers', 'workers', 'apache.workers', 'stacked'],
|
||||
'lines': [
|
||||
['idle'],
|
||||
['busy'],
|
||||
]},
|
||||
'reqpersec': {
|
||||
'options': [None, 'apache Lifetime Avg. Requests/s', 'requests/s', 'statistics',
|
||||
'options': [None, 'Lifetime Avg. Requests/s', 'requests/s', 'statistics',
|
||||
'apache.reqpersec', 'area'],
|
||||
'lines': [
|
||||
['requests_sec']
|
||||
['requests_sec', 'requests', 'absolute', 1, 100000]
|
||||
]},
|
||||
'bytespersec': {
|
||||
'options': [None, 'apache Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics',
|
||||
'options': [None, 'Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics',
|
||||
'apache.bytesperreq', 'area'],
|
||||
'lines': [
|
||||
['size_sec', None, 'absolute', 8, 1000]
|
||||
['size_sec', None, 'absolute', 8, 1000 * 100000]
|
||||
]},
|
||||
'requests': {
|
||||
'options': [None, 'apache Requests', 'requests/s', 'requests', 'apache.requests', 'line'],
|
||||
'options': [None, 'Requests', 'requests/s', 'requests', 'apache.requests', 'line'],
|
||||
'lines': [
|
||||
['requests', None, 'incremental']
|
||||
]},
|
||||
'net': {
|
||||
'options': [None, 'apache Bandwidth', 'kilobits/s', 'bandwidth', 'apache.net', 'area'],
|
||||
'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'apache.net', 'area'],
|
||||
'lines': [
|
||||
['sent', None, 'incremental', 8, 1]
|
||||
]},
|
||||
'connections': {
|
||||
'options': [None, 'apache Connections', 'connections', 'connections', 'apache.connections', 'line'],
|
||||
'options': [None, 'Connections', 'connections', 'connections', 'apache.connections', 'line'],
|
||||
'lines': [
|
||||
['connections']
|
||||
]},
|
||||
'conns_async': {
|
||||
'options': [None, 'apache Async Connections', 'connections', 'connections', 'apache.conns_async', 'stacked'],
|
||||
'options': [None, 'Async Connections', 'connections', 'connections', 'apache.conns_async', 'stacked'],
|
||||
'lines': [
|
||||
['keepalive'],
|
||||
['closing'],
|
||||
|
@ -85,6 +82,14 @@ ASSIGNMENT = {
|
|||
'ConnsAsyncWriting': 'writing'
|
||||
}
|
||||
|
||||
FLOAT_VALUES = [
|
||||
'BytesPerReq',
|
||||
'ReqPerSec',
|
||||
'BytesPerSec',
|
||||
]
|
||||
|
||||
LIGHTTPD_MARKER = 'idle_servers'
|
||||
|
||||
|
||||
class Service(UrlService):
|
||||
def __init__(self, configuration=None, name=None):
|
||||
|
@ -95,20 +100,15 @@ class Service(UrlService):
|
|||
|
||||
def check(self):
|
||||
self._manager = self._build_manager()
|
||||
|
||||
data = self._get_data()
|
||||
|
||||
if not data:
|
||||
return None
|
||||
|
||||
if 'idle_servers' in data:
|
||||
self.module_name = 'lighttpd'
|
||||
for chart in self.definitions:
|
||||
if chart == 'workers':
|
||||
lines = self.definitions[chart]['lines']
|
||||
lines[0] = ['idle_servers', 'idle']
|
||||
lines[1] = ['busy_servers', 'busy']
|
||||
opts = self.definitions[chart]['options']
|
||||
opts[1] = opts[1].replace('apache', 'lighttpd')
|
||||
opts[4] = opts[4].replace('apache', 'lighttpd')
|
||||
if LIGHTTPD_MARKER in data:
|
||||
self.turn_into_lighttpd()
|
||||
|
||||
return True
|
||||
|
||||
def _get_data(self):
|
||||
|
@ -117,15 +117,44 @@ class Service(UrlService):
|
|||
:return: dict
|
||||
"""
|
||||
raw_data = self._get_raw_data()
|
||||
|
||||
if not raw_data:
|
||||
return None
|
||||
|
||||
data = dict()
|
||||
|
||||
for row in raw_data.split('\n'):
|
||||
tmp = row.split(':')
|
||||
if tmp[0] in ASSIGNMENT:
|
||||
try:
|
||||
data[ASSIGNMENT[tmp[0]]] = int(float(tmp[1]))
|
||||
except (IndexError, ValueError):
|
||||
continue
|
||||
for line in raw_data.split('\n'):
|
||||
try:
|
||||
parse_line(line, data)
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
return data or None
|
||||
|
||||
def turn_into_lighttpd(self):
|
||||
self.module_name = 'lighttpd'
|
||||
for chart in self.definitions:
|
||||
if chart == 'workers':
|
||||
lines = self.definitions[chart]['lines']
|
||||
lines[0] = ['idle_servers', 'idle']
|
||||
lines[1] = ['busy_servers', 'busy']
|
||||
opts = self.definitions[chart]['options']
|
||||
opts[1] = opts[1].replace('apache', 'lighttpd')
|
||||
opts[4] = opts[4].replace('apache', 'lighttpd')
|
||||
|
||||
|
||||
def parse_line(line, data):
|
||||
parts = line.split(':')
|
||||
|
||||
if len(parts) != 2:
|
||||
return
|
||||
|
||||
key, value = parts[0], parts[1]
|
||||
|
||||
if key not in ASSIGNMENT:
|
||||
return
|
||||
|
||||
if key in FLOAT_VALUES:
|
||||
data[ASSIGNMENT[key]] = int((float(value) * 100000))
|
||||
else:
|
||||
data[ASSIGNMENT[key]] = int(value)
|
||||
|
|
|
@ -12,12 +12,18 @@ except ImportError:
|
|||
from bases.FrameworkServices.SimpleService import SimpleService
|
||||
from bases.loaders import safe_load
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
# update_every = 2
|
||||
priority = 60000
|
||||
|
||||
ORDER = ['cpu_usage', 'jobs_rate', 'connections_rate', 'commands_rate', 'current_tubes', 'current_jobs',
|
||||
'current_connections', 'binlog', 'uptime']
|
||||
ORDER = [
|
||||
'cpu_usage',
|
||||
'jobs_rate',
|
||||
'connections_rate',
|
||||
'commands_rate',
|
||||
'current_tubes',
|
||||
'current_jobs',
|
||||
'current_connections',
|
||||
'binlog',
|
||||
'uptime',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'cpu_usage': {
|
||||
|
|
|
@ -11,10 +11,15 @@ from subprocess import Popen
|
|||
from bases.collection import find_binary
|
||||
from bases.FrameworkServices.SimpleService import SimpleService
|
||||
|
||||
priority = 60000
|
||||
|
||||
update_every = 30
|
||||
|
||||
ORDER = ['name_server_statistics', 'incoming_queries', 'outgoing_queries', 'named_stats_size']
|
||||
ORDER = [
|
||||
'name_server_statistics',
|
||||
'incoming_queries',
|
||||
'outgoing_queries',
|
||||
'named_stats_size',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'name_server_statistics': {
|
||||
|
@ -43,7 +48,7 @@ CHARTS = {
|
|||
'lines': [
|
||||
]},
|
||||
'named_stats_size': {
|
||||
'options': [None, 'Named Stats File Size', 'MB', 'file size', 'bind_rndc.stats_size', 'line'],
|
||||
'options': [None, 'Named Stats File Size', 'MiB', 'file size', 'bind_rndc.stats_size', 'line'],
|
||||
'lines': [
|
||||
['stats_size', None, 'absolute', 1, 1 << 20]
|
||||
]
|
||||
|
@ -91,10 +96,20 @@ class Service(SimpleService):
|
|||
self.definitions = CHARTS
|
||||
self.named_stats_path = self.configuration.get('named_stats_path', '/var/log/bind/named.stats')
|
||||
self.rndc = find_binary('rndc')
|
||||
self.data = dict(nms_requests=0, nms_responses=0, nms_failure=0, nms_auth=0,
|
||||
nms_non_auth=0, nms_nxrrset=0, nms_success=0, nms_nxdomain=0,
|
||||
nms_recursion=0, nms_duplicate=0, nms_rejected_queries=0,
|
||||
nms_dropped_queries=0)
|
||||
self.data = dict(
|
||||
nms_requests=0,
|
||||
nms_responses=0,
|
||||
nms_failure=0,
|
||||
nms_auth=0,
|
||||
nms_non_auth=0,
|
||||
nms_nxrrset=0,
|
||||
nms_success=0,
|
||||
nms_nxdomain=0,
|
||||
nms_recursion=0,
|
||||
nms_duplicate=0,
|
||||
nms_rejected_queries=0,
|
||||
nms_dropped_queries=0,
|
||||
)
|
||||
|
||||
def check(self):
|
||||
if not self.rndc:
|
||||
|
|
|
@ -10,7 +10,12 @@ from bases.FrameworkServices.SimpleService import SimpleService
|
|||
from third_party import boinc_client
|
||||
|
||||
|
||||
ORDER = ['tasks', 'states', 'sched_states', 'process_states']
|
||||
ORDER = [
|
||||
'tasks',
|
||||
'states',
|
||||
'sched_states',
|
||||
'process_states',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'tasks': {
|
||||
|
@ -141,14 +146,16 @@ class Service(SimpleService):
|
|||
def _get_data(self):
|
||||
if not self.is_alive():
|
||||
return None
|
||||
|
||||
data = dict(_DATA_TEMPLATE)
|
||||
results = []
|
||||
|
||||
try:
|
||||
results = self.client.get_tasks()
|
||||
except socket.error:
|
||||
self.error('Connection is dead')
|
||||
self.alive = False
|
||||
return None
|
||||
|
||||
for task in results:
|
||||
data['total'] += 1
|
||||
data[_TASK_MAP[task.state]] += 1
|
||||
|
@ -159,4 +166,5 @@ class Service(SimpleService):
|
|||
data[_PROC_MAP[task.active_task_state]] += 1
|
||||
except AttributeError:
|
||||
pass
|
||||
return data
|
||||
|
||||
return data or None
|
||||
|
|
|
@ -9,13 +9,13 @@ try:
|
|||
except ImportError:
|
||||
CEPH = False
|
||||
|
||||
import os
|
||||
import json
|
||||
import os
|
||||
|
||||
from bases.FrameworkServices.SimpleService import SimpleService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
update_every = 10
|
||||
priority = 60000
|
||||
|
||||
ORDER = [
|
||||
'general_usage',
|
||||
|
@ -36,7 +36,7 @@ ORDER = [
|
|||
|
||||
CHARTS = {
|
||||
'general_usage': {
|
||||
'options': [None, 'Ceph General Space', 'KB', 'general', 'ceph.general_usage', 'stacked'],
|
||||
'options': [None, 'Ceph General Space', 'KiB', 'general', 'ceph.general_usage', 'stacked'],
|
||||
'lines': [
|
||||
['general_available', 'avail', 'absolute'],
|
||||
['general_usage', 'used', 'absolute']
|
||||
|
@ -49,7 +49,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'general_bytes': {
|
||||
'options': [None, 'Ceph General Read/Write Data/s', 'KB', 'general', 'ceph.general_bytes',
|
||||
'options': [None, 'Ceph General Read/Write Data/s', 'KiB/s', 'general', 'ceph.general_bytes',
|
||||
'area'],
|
||||
'lines': [
|
||||
['general_read_bytes', 'read', 'absolute', 1, 1024],
|
||||
|
@ -73,7 +73,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'pool_usage': {
|
||||
'options': [None, 'Ceph Pools', 'KB', 'pool', 'ceph.pool_usage', 'line'],
|
||||
'options': [None, 'Ceph Pools', 'KiB', 'pool', 'ceph.pool_usage', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
'pool_objects': {
|
||||
|
@ -81,11 +81,11 @@ CHARTS = {
|
|||
'lines': []
|
||||
},
|
||||
'pool_read_bytes': {
|
||||
'options': [None, 'Ceph Read Pool Data/s', 'KB', 'pool', 'ceph.pool_read_bytes', 'area'],
|
||||
'options': [None, 'Ceph Read Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_read_bytes', 'area'],
|
||||
'lines': []
|
||||
},
|
||||
'pool_write_bytes': {
|
||||
'options': [None, 'Ceph Write Pool Data/s', 'KB', 'pool', 'ceph.pool_write_bytes', 'area'],
|
||||
'options': [None, 'Ceph Write Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_write_bytes', 'area'],
|
||||
'lines': []
|
||||
},
|
||||
'pool_read_operations': {
|
||||
|
@ -97,7 +97,7 @@ CHARTS = {
|
|||
'lines': []
|
||||
},
|
||||
'osd_usage': {
|
||||
'options': [None, 'Ceph OSDs', 'KB', 'osd', 'ceph.osd_usage', 'line'],
|
||||
'options': [None, 'Ceph OSDs', 'KiB', 'osd', 'ceph.osd_usage', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
'osd_apply_latency': {
|
||||
|
|
|
@ -7,10 +7,19 @@ from bases.FrameworkServices.ExecutableService import ExecutableService
|
|||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
update_every = 5
|
||||
priority = 60000
|
||||
|
||||
CHRONY_COMMAND = 'chronyc -n tracking'
|
||||
|
||||
# charts order (can be overridden if you want less charts, or different order)
|
||||
ORDER = ['system', 'offsets', 'stratum', 'root', 'frequency', 'residualfreq', 'skew']
|
||||
ORDER = [
|
||||
'system',
|
||||
'offsets',
|
||||
'stratum',
|
||||
'root',
|
||||
'frequency',
|
||||
'residualfreq',
|
||||
'skew',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'system': {
|
||||
|
@ -76,9 +85,9 @@ class Service(ExecutableService):
|
|||
def __init__(self, configuration=None, name=None):
|
||||
ExecutableService.__init__(
|
||||
self, configuration=configuration, name=name)
|
||||
self.command = 'chronyc -n tracking'
|
||||
self.order = ORDER
|
||||
self.definitions = CHARTS
|
||||
self.command = CHRONY_COMMAND
|
||||
|
||||
def _get_data(self):
|
||||
"""
|
||||
|
|
|
@ -8,6 +8,7 @@ from collections import namedtuple, defaultdict
|
|||
from json import loads
|
||||
from threading import Thread
|
||||
from socket import gethostbyname, gaierror
|
||||
|
||||
try:
|
||||
from queue import Queue
|
||||
except ImportError:
|
||||
|
@ -15,9 +16,9 @@ except ImportError:
|
|||
|
||||
from bases.FrameworkServices.UrlService import UrlService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
|
||||
update_every = 1
|
||||
priority = 60000
|
||||
|
||||
|
||||
METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
|
||||
|
||||
|
@ -108,7 +109,7 @@ ORDER = [
|
|||
|
||||
CHARTS = {
|
||||
'activity': {
|
||||
'options': [None, 'Overall Activity', 'req/s',
|
||||
'options': [None, 'Overall Activity', 'requests/s',
|
||||
'dbactivity', 'couchdb.activity', 'stacked'],
|
||||
'lines': [
|
||||
['couchdb_database_reads', 'DB reads', 'incremental'],
|
||||
|
@ -117,7 +118,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'request_methods': {
|
||||
'options': [None, 'HTTP request methods', 'req/s',
|
||||
'options': [None, 'HTTP request methods', 'requests/s',
|
||||
'httptraffic', 'couchdb.request_methods',
|
||||
'stacked'],
|
||||
'lines': [
|
||||
|
@ -132,7 +133,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'response_codes': {
|
||||
'options': [None, 'HTTP response status codes', 'resp/s',
|
||||
'options': [None, 'HTTP response status codes', 'responses/s',
|
||||
'httptraffic', 'couchdb.response_codes',
|
||||
'stacked'],
|
||||
'lines': [
|
||||
|
@ -150,15 +151,13 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'open_files': {
|
||||
'options': [None, 'Open files', 'files',
|
||||
'ops', 'couchdb.open_files', 'line'],
|
||||
'options': [None, 'Open files', 'files', 'ops', 'couchdb.open_files', 'line'],
|
||||
'lines': [
|
||||
['couchdb_open_os_files', '# files', 'absolute']
|
||||
]
|
||||
},
|
||||
'active_tasks': {
|
||||
'options': [None, 'Active task breakdown', 'tasks',
|
||||
'ops', 'couchdb.active_tasks', 'stacked'],
|
||||
'options': [None, 'Active task breakdown', 'tasks', 'ops', 'couchdb.active_tasks', 'stacked'],
|
||||
'lines': [
|
||||
['activetasks_indexer', 'Indexer', 'absolute'],
|
||||
['activetasks_database_compaction', 'DB Compaction', 'absolute'],
|
||||
|
@ -167,8 +166,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'replicator_jobs': {
|
||||
'options': [None, 'Replicator job breakdown', 'jobs',
|
||||
'ops', 'couchdb.replicator_jobs', 'stacked'],
|
||||
'options': [None, 'Replicator job breakdown', 'jobs', 'ops', 'couchdb.replicator_jobs', 'stacked'],
|
||||
'lines': [
|
||||
['couch_replicator_jobs_running', 'Running', 'absolute'],
|
||||
['couch_replicator_jobs_pending', 'Pending', 'absolute'],
|
||||
|
@ -178,8 +176,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'erlang_memory': {
|
||||
'options': [None, 'Erlang VM memory usage', 'bytes',
|
||||
'erlang', 'couchdb.erlang_vm_memory', 'stacked'],
|
||||
'options': [None, 'Erlang VM memory usage', 'B', 'erlang', 'couchdb.erlang_vm_memory', 'stacked'],
|
||||
'lines': [
|
||||
['memory_atom', 'atom', 'absolute'],
|
||||
['memory_binary', 'binaries', 'absolute'],
|
||||
|
@ -190,23 +187,20 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'erlang_reductions': {
|
||||
'options': [None, 'Erlang reductions', 'count',
|
||||
'erlang', 'couchdb.reductions', 'line'],
|
||||
'options': [None, 'Erlang reductions', 'count', 'erlang', 'couchdb.reductions', 'line'],
|
||||
'lines': [
|
||||
['reductions', 'reductions', 'incremental']
|
||||
]
|
||||
},
|
||||
'erlang_proc_counts': {
|
||||
'options': [None, 'Process counts', 'count',
|
||||
'erlang', 'couchdb.proccounts', 'line'],
|
||||
'options': [None, 'Process counts', 'count', 'erlang', 'couchdb.proccounts', 'line'],
|
||||
'lines': [
|
||||
['os_proc_count', 'OS procs', 'absolute'],
|
||||
['process_count', 'erl procs', 'absolute']
|
||||
]
|
||||
},
|
||||
'erlang_peak_msg_queue': {
|
||||
'options': [None, 'Peak message queue size', 'count',
|
||||
'erlang', 'couchdb.peakmsgqueue',
|
||||
'options': [None, 'Peak message queue size', 'count', 'erlang', 'couchdb.peakmsgqueue',
|
||||
'line'],
|
||||
'lines': [
|
||||
['peak_msg_queue', 'peak size', 'absolute']
|
||||
|
@ -214,18 +208,15 @@ CHARTS = {
|
|||
},
|
||||
# Lines for the following are added as part of check()
|
||||
'db_sizes_file': {
|
||||
'options': [None, 'Database sizes (file)', 'KB',
|
||||
'perdbstats', 'couchdb.db_sizes_file', 'line'],
|
||||
'options': [None, 'Database sizes (file)', 'KiB', 'perdbstats', 'couchdb.db_sizes_file', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
'db_sizes_external': {
|
||||
'options': [None, 'Database sizes (external)', 'KB',
|
||||
'perdbstats', 'couchdb.db_sizes_external', 'line'],
|
||||
'options': [None, 'Database sizes (external)', 'KiB', 'perdbstats', 'couchdb.db_sizes_external', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
'db_sizes_active': {
|
||||
'options': [None, 'Database sizes (active)', 'KB',
|
||||
'perdbstats', 'couchdb.db_sizes_active', 'line'],
|
||||
'options': [None, 'Database sizes (active)', 'KiB', 'perdbstats', 'couchdb.db_sizes_active', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
'db_doc_counts': {
|
||||
|
@ -234,8 +225,7 @@ CHARTS = {
|
|||
'lines': []
|
||||
},
|
||||
'db_doc_del_counts': {
|
||||
'options': [None, 'Database # of deleted docs', 'docs',
|
||||
'perdbstats', 'couchdb_db_doc_del_count', 'line'],
|
||||
'options': [None, 'Database # of deleted docs', 'docs', 'perdbstats', 'couchdb_db_doc_del_count', 'line'],
|
||||
'lines': []
|
||||
}
|
||||
}
|
||||
|
@ -255,7 +245,7 @@ class Service(UrlService):
|
|||
try:
|
||||
self.dbs = self.configuration.get('databases').split(' ')
|
||||
except (KeyError, AttributeError):
|
||||
self.dbs = []
|
||||
self.dbs = list()
|
||||
|
||||
def check(self):
|
||||
if not (self.host and self.port):
|
||||
|
|
|
@ -28,10 +28,7 @@ except ImportError:
|
|||
from bases.FrameworkServices.SimpleService import SimpleService
|
||||
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
update_every = 5
|
||||
priority = 60000
|
||||
retries = 60
|
||||
|
||||
|
||||
class Service(SimpleService):
|
||||
|
@ -46,14 +43,14 @@ class Service(SimpleService):
|
|||
|
||||
def check(self):
|
||||
if not DNS_PYTHON:
|
||||
self.error('\'python-dnspython\' package is needed to use dns_query_time.chart.py')
|
||||
self.error("'python-dnspython' package is needed to use dns_query_time.chart.py")
|
||||
return False
|
||||
|
||||
self.timeout = self.timeout if isinstance(self.timeout, int) else 4
|
||||
|
||||
if not all([self.domains, self.server_list,
|
||||
isinstance(self.server_list, str), isinstance(self.domains, str)]):
|
||||
self.error('server_list and domain_list can\'t be empty')
|
||||
self.error("server_list and domain_list can't be empty")
|
||||
return False
|
||||
else:
|
||||
self.domains, self.server_list = self.domains.split(), self.server_list.split()
|
||||
|
@ -129,17 +126,27 @@ def create_charts(aggregate, server_list):
|
|||
}
|
||||
}
|
||||
for ns in server_list:
|
||||
definitions['dns_group']['lines'].append(['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute'])
|
||||
dim = [
|
||||
'_'.join(['ns', ns.replace('.', '_')]),
|
||||
ns,
|
||||
'absolute',
|
||||
]
|
||||
definitions['dns_group']['lines'].append(dim)
|
||||
|
||||
return order, definitions
|
||||
else:
|
||||
order = [''.join(['dns_', ns.replace('.', '_')]) for ns in server_list]
|
||||
definitions = dict()
|
||||
|
||||
for ns in server_list:
|
||||
definitions[''.join(['dns_', ns.replace('.', '_')])] = {
|
||||
'options': [None, 'DNS Response Time', 'ms', ns, 'dns_query_time.response_time', 'area'],
|
||||
'lines': [
|
||||
['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute']
|
||||
[
|
||||
'_'.join(['ns', ns.replace('.', '_')]),
|
||||
ns,
|
||||
'absolute',
|
||||
]
|
||||
]
|
||||
}
|
||||
return order, definitions
|
||||
|
|
|
@ -90,9 +90,9 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'servermem': {
|
||||
'options': [None, 'DNSDIST server memory utilization', 'MB', 'server', 'dnsdist.servermem', 'area'],
|
||||
'options': [None, 'DNSDIST server memory utilization', 'MiB', 'server', 'dnsdist.servermem', 'area'],
|
||||
'lines': [
|
||||
['real-memory-usage', 'memory usage', 'absolute', 1, 1048576]
|
||||
['real-memory-usage', 'memory usage', 'absolute', 1, 1 << 20]
|
||||
]
|
||||
},
|
||||
'query_latency': {
|
||||
|
|
|
@ -23,21 +23,21 @@ ORDER = [
|
|||
|
||||
CHARTS = {
|
||||
'running_containers': {
|
||||
'options': [None, 'Number of running containers', 'running containers', 'running containers',
|
||||
'options': [None, 'Number of running containers', 'containers', 'running containers',
|
||||
'docker.running_containers', 'line'],
|
||||
'lines': [
|
||||
['running_containers', 'running']
|
||||
]
|
||||
},
|
||||
'healthy_containers': {
|
||||
'options': [None, 'Number of healthy containers', 'healthy containers', 'healthy containers',
|
||||
'options': [None, 'Number of healthy containers', 'containers', 'healthy containers',
|
||||
'docker.healthy_containers', 'line'],
|
||||
'lines': [
|
||||
['healthy_containers', 'healthy']
|
||||
]
|
||||
},
|
||||
'unhealthy_containers': {
|
||||
'options': [None, 'Number of unhealthy containers', 'unhealthy containers', 'unhealthy containers',
|
||||
'options': [None, 'Number of unhealthy containers', 'containers', 'unhealthy containers',
|
||||
'docker.unhealthy_containers', 'line'],
|
||||
'lines': [
|
||||
['unhealthy_containers', 'unhealthy']
|
||||
|
@ -51,10 +51,11 @@ class Service(SimpleService):
|
|||
SimpleService.__init__(self, configuration=configuration, name=name)
|
||||
self.order = ORDER
|
||||
self.definitions = CHARTS
|
||||
self.client = None
|
||||
|
||||
def check(self):
|
||||
if not HAS_DOCKER:
|
||||
self.error('\'docker\' package is needed to use docker.chart.py')
|
||||
self.error("'docker' package is needed to use docker.chart.py")
|
||||
return False
|
||||
|
||||
self.client = docker.DockerClient(base_url=self.configuration.get('url', 'unix://var/run/docker.sock'))
|
||||
|
@ -69,6 +70,7 @@ class Service(SimpleService):
|
|||
|
||||
def get_data(self):
|
||||
data = dict()
|
||||
|
||||
data['running_containers'] = len(self.client.containers.list(sparse=True))
|
||||
data['healthy_containers'] = len(self.client.containers.list(filters={'health': 'healthy'}, sparse=True))
|
||||
data['unhealthy_containers'] = len(self.client.containers.list(filters={'health': 'unhealthy'}, sparse=True))
|
||||
|
|
|
@ -5,11 +5,10 @@
|
|||
|
||||
from bases.FrameworkServices.SocketService import SocketService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
# update_every = 2
|
||||
priority = 60000
|
||||
|
||||
# charts order (can be overridden if you want less charts, or different order)
|
||||
UNIX_SOCKET = '/var/run/dovecot/stats'
|
||||
|
||||
|
||||
ORDER = [
|
||||
'sessions',
|
||||
'logins',
|
||||
|
@ -52,14 +51,14 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'context_switches': {
|
||||
'options': [None, 'Dovecot Context Switches', '', 'context switches', 'dovecot.context_switches', 'line'],
|
||||
'options': [None, 'Dovecot Context Switches', 'switches', 'context switches', 'dovecot.context_switches', 'line'],
|
||||
'lines': [
|
||||
['vol_cs', 'voluntary', 'absolute'],
|
||||
['invol_cs', 'involuntary', 'absolute']
|
||||
]
|
||||
},
|
||||
'io': {
|
||||
'options': [None, 'Dovecot Disk I/O', 'kilobytes/s', 'disk', 'dovecot.io', 'area'],
|
||||
'options': [None, 'Dovecot Disk I/O', 'KiB/s', 'disk', 'dovecot.io', 'area'],
|
||||
'lines': [
|
||||
['disk_input', 'read', 'incremental', 1, 1024],
|
||||
['disk_output', 'write', 'incremental', -1, 1024]
|
||||
|
@ -68,8 +67,8 @@ CHARTS = {
|
|||
'net': {
|
||||
'options': [None, 'Dovecot Network Bandwidth', 'kilobits/s', 'network', 'dovecot.net', 'area'],
|
||||
'lines': [
|
||||
['read_bytes', 'read', 'incremental', 8, 1024],
|
||||
['write_bytes', 'write', 'incremental', -8, 1024]
|
||||
['read_bytes', 'read', 'incremental', 8, 1000],
|
||||
['write_bytes', 'write', 'incremental', -8, 1000]
|
||||
]
|
||||
},
|
||||
'syscalls': {
|
||||
|
@ -112,13 +111,12 @@ CHARTS = {
|
|||
class Service(SocketService):
|
||||
def __init__(self, configuration=None, name=None):
|
||||
SocketService.__init__(self, configuration=configuration, name=name)
|
||||
self.request = 'EXPORT\tglobal\r\n'
|
||||
self.host = None # localhost
|
||||
self.port = None # 24242
|
||||
# self._keep_alive = True
|
||||
self.unix_socket = '/var/run/dovecot/stats'
|
||||
self.order = ORDER
|
||||
self.definitions = CHARTS
|
||||
self.host = None # localhost
|
||||
self.port = None # 24242
|
||||
self.unix_socket = UNIX_SOCKET
|
||||
self.request = 'EXPORT\tglobal\r\n'
|
||||
|
||||
def _get_data(self):
|
||||
"""
|
||||
|
|
|
@ -159,17 +159,20 @@ ORDER = [
|
|||
'fielddata_evictions_tripped',
|
||||
'cluster_health_status',
|
||||
'cluster_health_nodes',
|
||||
'cluster_health_pending_tasks',
|
||||
'cluster_health_flight_fetch',
|
||||
'cluster_health_shards',
|
||||
'cluster_stats_nodes',
|
||||
'cluster_stats_query_cache',
|
||||
'cluster_stats_docs',
|
||||
'cluster_stats_store',
|
||||
'cluster_stats_indices_shards',
|
||||
'cluster_stats_indices',
|
||||
'cluster_stats_shards_total',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'search_performance_total': {
|
||||
'options': [None, 'Queries And Fetches', 'number of', 'search performance',
|
||||
'options': [None, 'Queries And Fetches', 'events/s', 'search performance',
|
||||
'elastic.search_performance_total', 'stacked'],
|
||||
'lines': [
|
||||
['indices_search_query_total', 'queries', 'incremental'],
|
||||
|
@ -177,7 +180,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'search_performance_current': {
|
||||
'options': [None, 'Queries and Fetches In Progress', 'number of', 'search performance',
|
||||
'options': [None, 'Queries and Fetches In Progress', 'events', 'search performance',
|
||||
'elastic.search_performance_current', 'stacked'],
|
||||
'lines': [
|
||||
['indices_search_query_current', 'queries', 'absolute'],
|
||||
|
@ -193,14 +196,14 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'search_latency': {
|
||||
'options': [None, 'Query And Fetch Latency', 'ms', 'search performance', 'elastic.search_latency', 'stacked'],
|
||||
'options': [None, 'Query And Fetch Latency', 'milliseconds', 'search performance', 'elastic.search_latency', 'stacked'],
|
||||
'lines': [
|
||||
['query_latency', 'query', 'absolute', 1, 1000],
|
||||
['fetch_latency', 'fetch', 'absolute', 1, 1000]
|
||||
]
|
||||
},
|
||||
'index_performance_total': {
|
||||
'options': [None, 'Indexed Documents, Index Refreshes, Index Flushes To Disk', 'number of',
|
||||
'options': [None, 'Indexed Documents, Index Refreshes, Index Flushes To Disk', 'events/s',
|
||||
'indexing performance', 'elastic.index_performance_total', 'stacked'],
|
||||
'lines': [
|
||||
['indices_indexing_index_total', 'indexed', 'incremental'],
|
||||
|
@ -225,7 +228,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'index_latency': {
|
||||
'options': [None, 'Indexing And Flushing Latency', 'ms', 'indexing performance',
|
||||
'options': [None, 'Indexing And Flushing Latency', 'milliseconds', 'indexing performance',
|
||||
'elastic.index_latency', 'stacked'],
|
||||
'lines': [
|
||||
['indexing_latency', 'indexing', 'absolute', 1, 1000],
|
||||
|
@ -233,7 +236,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'index_translog_operations': {
|
||||
'options': [None, 'Translog Operations', 'count', 'translog',
|
||||
'options': [None, 'Translog Operations', 'operations', 'translog',
|
||||
'elastic.index_translog_operations', 'area'],
|
||||
'lines': [
|
||||
['indices_translog_operations', 'total', 'absolute'],
|
||||
|
@ -241,7 +244,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'index_translog_size': {
|
||||
'options': [None, 'Translog Size', 'MB', 'translog',
|
||||
'options': [None, 'Translog Size', 'MiB', 'translog',
|
||||
'elastic.index_translog_size', 'area'],
|
||||
'lines': [
|
||||
['indices_translog_size_in_bytes', 'total', 'absolute', 1, 1048567],
|
||||
|
@ -249,21 +252,21 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'index_segments_count': {
|
||||
'options': [None, 'Total Number Of Indices Segments', 'count', 'indices segments',
|
||||
'options': [None, 'Total Number Of Indices Segments', 'segments', 'indices segments',
|
||||
'elastic.index_segments_count', 'line'],
|
||||
'lines': [
|
||||
['indices_segments_count', 'segments', 'absolute']
|
||||
]
|
||||
},
|
||||
'index_segments_memory_writer': {
|
||||
'options': [None, 'Index Writer Memory Usage', 'MB', 'indices segments',
|
||||
'options': [None, 'Index Writer Memory Usage', 'MiB', 'indices segments',
|
||||
'elastic.index_segments_memory_writer', 'area'],
|
||||
'lines': [
|
||||
['indices_segments_index_writer_memory_in_bytes', 'total', 'absolute', 1, 1048567]
|
||||
]
|
||||
},
|
||||
'index_segments_memory': {
|
||||
'options': [None, 'Indices Segments Memory Usage', 'MB', 'indices segments',
|
||||
'options': [None, 'Indices Segments Memory Usage', 'MiB', 'indices segments',
|
||||
'elastic.index_segments_memory', 'stacked'],
|
||||
'lines': [
|
||||
['indices_segments_terms_memory_in_bytes', 'terms', 'absolute', 1, 1048567],
|
||||
|
@ -277,14 +280,14 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'jvm_mem_heap': {
|
||||
'options': [None, 'JVM Heap Percentage Currently in Use', 'percent', 'memory usage and gc',
|
||||
'options': [None, 'JVM Heap Percentage Currently in Use', 'percentage', 'memory usage and gc',
|
||||
'elastic.jvm_heap', 'area'],
|
||||
'lines': [
|
||||
['jvm_mem_heap_used_percent', 'inuse', 'absolute']
|
||||
]
|
||||
},
|
||||
'jvm_mem_heap_bytes': {
|
||||
'options': [None, 'JVM Heap Commit And Usage', 'MB', 'memory usage and gc',
|
||||
'options': [None, 'JVM Heap Commit And Usage', 'MiB', 'memory usage and gc',
|
||||
'elastic.jvm_heap_bytes', 'area'],
|
||||
'lines': [
|
||||
['jvm_mem_heap_committed_in_bytes', 'commited', 'absolute', 1, 1048576],
|
||||
|
@ -292,7 +295,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'jvm_buffer_pool_count': {
|
||||
'options': [None, 'JVM Buffers', 'count', 'memory usage and gc',
|
||||
'options': [None, 'JVM Buffers', 'pools', 'memory usage and gc',
|
||||
'elastic.jvm_buffer_pool_count', 'line'],
|
||||
'lines': [
|
||||
['jvm_buffer_pools_direct_count', 'direct', 'absolute'],
|
||||
|
@ -300,7 +303,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'jvm_direct_buffers_memory': {
|
||||
'options': [None, 'JVM Direct Buffers Memory', 'MB', 'memory usage and gc',
|
||||
'options': [None, 'JVM Direct Buffers Memory', 'MiB', 'memory usage and gc',
|
||||
'elastic.jvm_direct_buffers_memory', 'area'],
|
||||
'lines': [
|
||||
['jvm_buffer_pools_direct_used_in_bytes', 'used', 'absolute', 1, 1048567],
|
||||
|
@ -308,7 +311,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'jvm_mapped_buffers_memory': {
|
||||
'options': [None, 'JVM Mapped Buffers Memory', 'MB', 'memory usage and gc',
|
||||
'options': [None, 'JVM Mapped Buffers Memory', 'MiB', 'memory usage and gc',
|
||||
'elastic.jvm_mapped_buffers_memory', 'area'],
|
||||
'lines': [
|
||||
['jvm_buffer_pools_mapped_used_in_bytes', 'used', 'absolute', 1, 1048567],
|
||||
|
@ -316,14 +319,14 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'jvm_gc_count': {
|
||||
'options': [None, 'Garbage Collections', 'counts', 'memory usage and gc', 'elastic.gc_count', 'stacked'],
|
||||
'options': [None, 'Garbage Collections', 'events/s', 'memory usage and gc', 'elastic.gc_count', 'stacked'],
|
||||
'lines': [
|
||||
['jvm_gc_collectors_young_collection_count', 'young', 'incremental'],
|
||||
['jvm_gc_collectors_old_collection_count', 'old', 'incremental']
|
||||
]
|
||||
},
|
||||
'jvm_gc_time': {
|
||||
'options': [None, 'Time Spent On Garbage Collections', 'ms', 'memory usage and gc',
|
||||
'options': [None, 'Time Spent On Garbage Collections', 'milliseconds', 'memory usage and gc',
|
||||
'elastic.gc_time', 'stacked'],
|
||||
'lines': [
|
||||
['jvm_gc_collectors_young_collection_time_in_millis', 'young', 'incremental'],
|
||||
|
@ -353,13 +356,13 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'fielddata_cache': {
|
||||
'options': [None, 'Fielddata Cache', 'MB', 'fielddata cache', 'elastic.fielddata_cache', 'line'],
|
||||
'options': [None, 'Fielddata Cache', 'MiB', 'fielddata cache', 'elastic.fielddata_cache', 'line'],
|
||||
'lines': [
|
||||
['indices_fielddata_memory_size_in_bytes', 'cache', 'absolute', 1, 1048576]
|
||||
]
|
||||
},
|
||||
'fielddata_evictions_tripped': {
|
||||
'options': [None, 'Fielddata Evictions And Circuit Breaker Tripped Count', 'number of events',
|
||||
'options': [None, 'Fielddata Evictions And Circuit Breaker Tripped Count', 'events/s',
|
||||
'fielddata cache', 'elastic.fielddata_evictions_tripped', 'line'],
|
||||
'lines': [
|
||||
['indices_fielddata_evictions', 'evictions', 'incremental'],
|
||||
|
@ -367,12 +370,24 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'cluster_health_nodes': {
|
||||
'options': [None, 'Nodes And Tasks Statistics', 'units', 'cluster health API',
|
||||
'options': [None, 'Nodes Statistics', 'nodes', 'cluster health API',
|
||||
'elastic.cluster_health_nodes', 'stacked'],
|
||||
'lines': [
|
||||
['number_of_nodes', 'nodes', 'absolute'],
|
||||
['number_of_data_nodes', 'data_nodes', 'absolute'],
|
||||
]
|
||||
},
|
||||
'cluster_health_pending_tasks': {
|
||||
'options': [None, 'Tasks Statistics', 'tasks', 'cluster health API',
|
||||
'elastic.cluster_health_pending_tasks', 'line'],
|
||||
'lines': [
|
||||
['number_of_pending_tasks', 'pending_tasks', 'absolute'],
|
||||
]
|
||||
},
|
||||
'cluster_health_flight_fetch': {
|
||||
'options': [None, 'In Flight Fetches Statistics', 'fetches', 'cluster health API',
|
||||
'elastic.cluster_health_flight_fetch', 'line'],
|
||||
'lines': [
|
||||
['number_of_in_flight_fetch', 'in_flight_fetch', 'absolute']
|
||||
]
|
||||
},
|
||||
|
@ -420,24 +435,30 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'cluster_stats_docs': {
|
||||
'options': [None, 'Docs Statistics', 'count', 'cluster stats API',
|
||||
'options': [None, 'Docs Statistics', 'docs', 'cluster stats API',
|
||||
'elastic.cluster_docs', 'line'],
|
||||
'lines': [
|
||||
['indices_docs_count', 'docs', 'absolute']
|
||||
]
|
||||
},
|
||||
'cluster_stats_store': {
|
||||
'options': [None, 'Store Statistics', 'MB', 'cluster stats API',
|
||||
'options': [None, 'Store Statistics', 'MiB', 'cluster stats API',
|
||||
'elastic.cluster_store', 'line'],
|
||||
'lines': [
|
||||
['indices_store_size_in_bytes', 'size', 'absolute', 1, 1048567]
|
||||
]
|
||||
},
|
||||
'cluster_stats_indices_shards': {
|
||||
'options': [None, 'Indices And Shards Statistics', 'count', 'cluster stats API',
|
||||
'elastic.cluster_indices_shards', 'stacked'],
|
||||
'cluster_stats_indices': {
|
||||
'options': [None, 'Indices Statistics', 'indices', 'cluster stats API',
|
||||
'elastic.cluster_indices', 'line'],
|
||||
'lines': [
|
||||
['indices_count', 'indices', 'absolute'],
|
||||
]
|
||||
},
|
||||
'cluster_stats_shards_total': {
|
||||
'options': [None, 'Total Shards Statistics', 'shards', 'cluster stats API',
|
||||
'elastic.cluster_shards_total', 'line'],
|
||||
'lines': [
|
||||
['indices_shards_total', 'shards', 'absolute']
|
||||
]
|
||||
},
|
||||
|
@ -450,7 +471,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'host_metrics_file_descriptors': {
|
||||
'options': [None, 'Available File Descriptors In Percent', 'percent', 'host metrics',
|
||||
'options': [None, 'Available File Descriptors In Percent', 'percentage', 'host metrics',
|
||||
'elastic.host_descriptors', 'area'],
|
||||
'lines': [
|
||||
['file_descriptors_used', 'used', 'absolute', 1, 10]
|
||||
|
@ -473,9 +494,11 @@ class Service(UrlService):
|
|||
self.definitions = CHARTS
|
||||
self.host = self.configuration.get('host')
|
||||
self.port = self.configuration.get('port', 9200)
|
||||
self.url = '{scheme}://{host}:{port}'.format(scheme=self.configuration.get('scheme', 'http'),
|
||||
host=self.host,
|
||||
port=self.port)
|
||||
self.url = '{scheme}://{host}:{port}'.format(
|
||||
scheme=self.configuration.get('scheme', 'http'),
|
||||
host=self.host,
|
||||
port=self.port,
|
||||
)
|
||||
self.latency = dict()
|
||||
self.methods = list()
|
||||
|
||||
|
|
|
@ -7,11 +7,13 @@ from random import SystemRandom
|
|||
|
||||
from bases.FrameworkServices.SimpleService import SimpleService
|
||||
|
||||
# default module values
|
||||
# update_every = 4
|
||||
|
||||
priority = 90000
|
||||
|
||||
ORDER = ['random']
|
||||
ORDER = [
|
||||
'random',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'random': {
|
||||
'options': [None, 'A random number', 'random number', 'random', 'random', 'line'],
|
||||
|
|
|
@ -5,12 +5,12 @@
|
|||
|
||||
from bases.FrameworkServices.ExecutableService import ExecutableService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
# update_every = 2
|
||||
priority = 60000
|
||||
|
||||
# charts order (can be overridden if you want less charts, or different order)
|
||||
ORDER = ['qemails']
|
||||
EXIM_COMMAND = 'exim -bpc'
|
||||
|
||||
ORDER = [
|
||||
'qemails',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'qemails': {
|
||||
|
@ -25,9 +25,9 @@ CHARTS = {
|
|||
class Service(ExecutableService):
|
||||
def __init__(self, configuration=None, name=None):
|
||||
ExecutableService.__init__(self, configuration=configuration, name=name)
|
||||
self.command = 'exim -bpc'
|
||||
self.order = ORDER
|
||||
self.definitions = CHARTS
|
||||
self.command = EXIM_COMMAND
|
||||
|
||||
def _get_data(self):
|
||||
"""
|
||||
|
|
|
@ -35,8 +35,19 @@ def charts(jails):
|
|||
},
|
||||
}
|
||||
for jail in jails:
|
||||
ch[ORDER[0]]['lines'].append([jail, jail, 'incremental'])
|
||||
ch[ORDER[1]]['lines'].append(['{0}_in_jail'.format(jail), jail, 'absolute'])
|
||||
dim = [
|
||||
jail,
|
||||
jail,
|
||||
'incremental',
|
||||
]
|
||||
ch[ORDER[0]]['lines'].append(dim)
|
||||
|
||||
dim = [
|
||||
'{0}_in_jail'.format(jail),
|
||||
jail,
|
||||
'absolute',
|
||||
]
|
||||
ch[ORDER[1]]['lines'].append(dim)
|
||||
|
||||
return ch
|
||||
|
||||
|
@ -59,12 +70,10 @@ class Service(LogService):
|
|||
LogService.__init__(self, configuration=configuration, name=name)
|
||||
self.order = ORDER
|
||||
self.definitions = dict()
|
||||
|
||||
self.log_path = self.configuration.get('log_path', '/var/log/fail2ban.log')
|
||||
self.conf_path = self.configuration.get('conf_path', '/etc/fail2ban/jail.local')
|
||||
self.conf_dir = self.configuration.get('conf_dir', '/etc/fail2ban/jail.d/')
|
||||
self.exclude = self.configuration.get('exclude', str())
|
||||
|
||||
self.monitoring_jails = list()
|
||||
self.banned_ips = defaultdict(set)
|
||||
self.data = dict()
|
||||
|
|
|
@ -3,24 +3,37 @@
|
|||
# Author: l2isbad
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from re import findall
|
||||
import re
|
||||
from subprocess import Popen, PIPE
|
||||
|
||||
from bases.collection import find_binary
|
||||
from bases.FrameworkServices.SimpleService import SimpleService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
priority = 60000
|
||||
update_every = 15
|
||||
|
||||
PARSER = re.compile(r'((?<=-)[AP][a-zA-Z-]+) = (\d+)')
|
||||
|
||||
RADIUS_MSG = 'Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept'
|
||||
|
||||
# charts order (can be overridden if you want less charts, or different order)
|
||||
ORDER = ['authentication', 'accounting', 'proxy-auth', 'proxy-acct']
|
||||
RADCLIENT_RETRIES = 1
|
||||
RADCLIENT_TIMEOUT = 1
|
||||
|
||||
DEFAULT_HOST = 'localhost'
|
||||
DEFAULT_PORT = 18121
|
||||
DEFAULT_DO_ACCT = False
|
||||
DEFAULT_DO_PROXY_AUTH = False
|
||||
DEFAULT_DO_PROXY_ACCT = False
|
||||
|
||||
ORDER = [
|
||||
'authentication',
|
||||
'accounting',
|
||||
'proxy-auth',
|
||||
'proxy-acct',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'authentication': {
|
||||
'options': [None, 'Authentication', 'packets/s', 'Authentication', 'freerad.auth', 'line'],
|
||||
'options': [None, 'Authentication', 'packets/s', 'authentication', 'freerad.auth', 'line'],
|
||||
'lines': [
|
||||
['access-accepts', None, 'incremental'],
|
||||
['access-rejects', None, 'incremental'],
|
||||
|
@ -32,7 +45,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'accounting': {
|
||||
'options': [None, 'Accounting', 'packets/s', 'Accounting', 'freerad.acct', 'line'],
|
||||
'options': [None, 'Accounting', 'packets/s', 'accounting', 'freerad.acct', 'line'],
|
||||
'lines': [
|
||||
['accounting-requests', 'requests', 'incremental'],
|
||||
['accounting-responses', 'responses', 'incremental'],
|
||||
|
@ -44,7 +57,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'proxy-auth': {
|
||||
'options': [None, 'Proxy Authentication', 'packets/s', 'Authentication', 'freerad.proxy.auth', 'line'],
|
||||
'options': [None, 'Proxy Authentication', 'packets/s', 'authentication', 'freerad.proxy.auth', 'line'],
|
||||
'lines': [
|
||||
['proxy-access-accepts', 'access-accepts', 'incremental'],
|
||||
['proxy-access-rejects', 'access-rejects', 'incremental'],
|
||||
|
@ -56,7 +69,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'proxy-acct': {
|
||||
'options': [None, 'Proxy Accounting', 'packets/s', 'Accounting', 'freerad.proxy.acct', 'line'],
|
||||
'options': [None, 'Proxy Accounting', 'packets/s', 'accounting', 'freerad.proxy.acct', 'line'],
|
||||
'lines': [
|
||||
['proxy-accounting-requests', 'requests', 'incremental'],
|
||||
['proxy-accounting-responses', 'responses', 'incremental'],
|
||||
|
@ -70,46 +83,80 @@ CHARTS = {
|
|||
}
|
||||
|
||||
|
||||
def radclient_status(radclient, retries, timeout, host, port, secret):
|
||||
# radclient -r 1 -t 1 -x 127.0.0.1:18121 status secret
|
||||
|
||||
return '{radclient} -r {num_retries} -t {timeout} -x {host}:{port} status {secret}'.format(
|
||||
radclient=radclient,
|
||||
num_retries=retries,
|
||||
timeout=timeout,
|
||||
host=host,
|
||||
port=port,
|
||||
secret=secret,
|
||||
).split()
|
||||
|
||||
|
||||
class Service(SimpleService):
|
||||
def __init__(self, configuration=None, name=None):
|
||||
SimpleService.__init__(self, configuration=configuration, name=name)
|
||||
self.order = ORDER
|
||||
self.definitions = CHARTS
|
||||
self.host = self.configuration.get('host', 'localhost')
|
||||
self.port = self.configuration.get('port', '18121')
|
||||
self.host = self.configuration.get('host', DEFAULT_HOST)
|
||||
self.port = self.configuration.get('port', DEFAULT_PORT)
|
||||
self.secret = self.configuration.get('secret')
|
||||
self.acct = self.configuration.get('acct', False)
|
||||
self.proxy_auth = self.configuration.get('proxy_auth', False)
|
||||
self.proxy_acct = self.configuration.get('proxy_acct', False)
|
||||
chart_choice = [True, bool(self.acct), bool(self.proxy_auth), bool(self.proxy_acct)]
|
||||
self.order = [chart for chart, choice in zip(ORDER, chart_choice) if choice]
|
||||
self.do_acct = self.configuration.get('acct', DEFAULT_DO_ACCT)
|
||||
self.do_proxy_auth = self.configuration.get('proxy_auth', DEFAULT_DO_PROXY_AUTH)
|
||||
self.do_proxy_acct = self.configuration.get('proxy_acct', DEFAULT_DO_PROXY_ACCT)
|
||||
self.echo = find_binary('echo')
|
||||
self.radclient = find_binary('radclient')
|
||||
self.sub_echo = [self.echo, RADIUS_MSG]
|
||||
self.sub_radclient = [self.radclient, '-r', '1', '-t', '1', '-x',
|
||||
':'.join([self.host, self.port]), 'status', self.secret]
|
||||
self.sub_radclient = radclient_status(
|
||||
self.radclient, RADCLIENT_RETRIES, RADCLIENT_TIMEOUT, self.host, self.port, self.secret,
|
||||
)
|
||||
|
||||
def check(self):
|
||||
if not all([self.echo, self.radclient]):
|
||||
self.error('Can\'t locate "radclient" binary or binary is not executable by netdata')
|
||||
if not self.radclient:
|
||||
self.error("Can't locate 'radclient' binary or binary is not executable by netdata user")
|
||||
return False
|
||||
if not self.secret:
|
||||
self.error('"secret" not set')
|
||||
|
||||
if not self.echo:
|
||||
self.error("Can't locate 'echo' binary or binary is not executable by netdata user")
|
||||
return None
|
||||
|
||||
if self._get_raw_data():
|
||||
return True
|
||||
self.error('Request returned no data. Is server alive?')
|
||||
return False
|
||||
if not self.secret:
|
||||
self.error("'secret' isn't set")
|
||||
return None
|
||||
|
||||
def _get_data(self):
|
||||
if not self.get_raw_data():
|
||||
self.error('Request returned no data. Is server alive?')
|
||||
return False
|
||||
|
||||
if not self.do_acct:
|
||||
self.order.remove('accounting')
|
||||
|
||||
if not self.do_proxy_auth:
|
||||
self.order.remove('proxy-auth')
|
||||
|
||||
if not self.do_proxy_acct:
|
||||
self.order.remove('proxy-acct')
|
||||
|
||||
return True
|
||||
|
||||
def get_data(self):
|
||||
"""
|
||||
Format data received from shell command
|
||||
:return: dict
|
||||
"""
|
||||
result = self._get_raw_data()
|
||||
return dict([(elem[0].lower(), int(elem[1])) for elem in findall(r'((?<=-)[AP][a-zA-Z-]+) = (\d+)', result)])
|
||||
result = self.get_raw_data()
|
||||
|
||||
def _get_raw_data(self):
|
||||
if not result:
|
||||
return None
|
||||
|
||||
return dict(
|
||||
(key.lower(), value) for key, value in PARSER.findall(result)
|
||||
)
|
||||
|
||||
def get_raw_data(self):
|
||||
"""
|
||||
The following code is equivalent to
|
||||
'echo "Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept"
|
||||
|
@ -123,6 +170,8 @@ class Service(SimpleService):
|
|||
raw_result = process_rad.communicate()[0]
|
||||
except OSError:
|
||||
return None
|
||||
|
||||
if process_rad.returncode is 0:
|
||||
return raw_result.decode()
|
||||
|
||||
return None
|
||||
|
|
|
@ -8,13 +8,20 @@ import json
|
|||
|
||||
from bases.FrameworkServices.UrlService import UrlService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
# update_every = 2
|
||||
priority = 60000
|
||||
|
||||
MEMSTATS_ORDER = [
|
||||
'memstats_heap',
|
||||
'memstats_stack',
|
||||
'memstats_mspan',
|
||||
'memstats_mcache',
|
||||
'memstats_sys',
|
||||
'memstats_live_objects',
|
||||
'memstats_gc_pauses',
|
||||
]
|
||||
|
||||
MEMSTATS_CHARTS = {
|
||||
'memstats_heap': {
|
||||
'options': ['heap', 'memory: size of heap memory structures', 'kB', 'memstats',
|
||||
'options': ['heap', 'memory: size of heap memory structures', 'KiB', 'memstats',
|
||||
'expvar.memstats.heap', 'line'],
|
||||
'lines': [
|
||||
['memstats_heap_alloc', 'alloc', 'absolute', 1, 1024],
|
||||
|
@ -22,21 +29,21 @@ MEMSTATS_CHARTS = {
|
|||
]
|
||||
},
|
||||
'memstats_stack': {
|
||||
'options': ['stack', 'memory: size of stack memory structures', 'kB', 'memstats',
|
||||
'options': ['stack', 'memory: size of stack memory structures', 'KiB', 'memstats',
|
||||
'expvar.memstats.stack', 'line'],
|
||||
'lines': [
|
||||
['memstats_stack_inuse', 'inuse', 'absolute', 1, 1024]
|
||||
]
|
||||
},
|
||||
'memstats_mspan': {
|
||||
'options': ['mspan', 'memory: size of mspan memory structures', 'kB', 'memstats',
|
||||
'options': ['mspan', 'memory: size of mspan memory structures', 'KiB', 'memstats',
|
||||
'expvar.memstats.mspan', 'line'],
|
||||
'lines': [
|
||||
['memstats_mspan_inuse', 'inuse', 'absolute', 1, 1024]
|
||||
]
|
||||
},
|
||||
'memstats_mcache': {
|
||||
'options': ['mcache', 'memory: size of mcache memory structures', 'kB', 'memstats',
|
||||
'options': ['mcache', 'memory: size of mcache memory structures', 'KiB', 'memstats',
|
||||
'expvar.memstats.mcache', 'line'],
|
||||
'lines': [
|
||||
['memstats_mcache_inuse', 'inuse', 'absolute', 1, 1024]
|
||||
|
@ -50,7 +57,7 @@ MEMSTATS_CHARTS = {
|
|||
]
|
||||
},
|
||||
'memstats_sys': {
|
||||
'options': ['sys', 'memory: size of reserved virtual address space', 'kB', 'memstats',
|
||||
'options': ['sys', 'memory: size of reserved virtual address space', 'KiB', 'memstats',
|
||||
'expvar.memstats.sys', 'line'],
|
||||
'lines': [
|
||||
['memstats_sys', 'sys', 'absolute', 1, 1024]
|
||||
|
@ -65,9 +72,6 @@ MEMSTATS_CHARTS = {
|
|||
}
|
||||
}
|
||||
|
||||
MEMSTATS_ORDER = ['memstats_heap', 'memstats_stack', 'memstats_mspan', 'memstats_mcache',
|
||||
'memstats_sys', 'memstats_live_objects', 'memstats_gc_pauses']
|
||||
|
||||
|
||||
def flatten(d, top='', sep='.'):
|
||||
items = []
|
||||
|
@ -83,7 +87,6 @@ def flatten(d, top='', sep='.'):
|
|||
class Service(UrlService):
|
||||
def __init__(self, configuration=None, name=None):
|
||||
UrlService.__init__(self, configuration=configuration, name=name)
|
||||
|
||||
# if memstats collection is enabled, add the charts and their order
|
||||
if self.configuration.get('collect_memstats'):
|
||||
self.definitions = dict(MEMSTATS_CHARTS)
|
||||
|
|
|
@ -14,11 +14,6 @@ except ImportError:
|
|||
from bases.FrameworkServices.SocketService import SocketService
|
||||
from bases.FrameworkServices.UrlService import UrlService
|
||||
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
# update_every = 2
|
||||
priority = 60000
|
||||
|
||||
# charts order (can be overridden if you want less charts, or different order)
|
||||
ORDER = [
|
||||
'fbin',
|
||||
|
@ -55,11 +50,11 @@ ORDER = [
|
|||
|
||||
CHARTS = {
|
||||
'fbin': {
|
||||
'options': [None, 'Kilobytes In', 'KB/s', 'frontend', 'haproxy_f.bin', 'line'],
|
||||
'options': [None, 'Kilobytes In', 'KiB/s', 'frontend', 'haproxy_f.bin', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
'fbout': {
|
||||
'options': [None, 'Kilobytes Out', 'KB/s', 'frontend', 'haproxy_f.bout', 'line'],
|
||||
'options': [None, 'Kilobytes Out', 'KiB/s', 'frontend', 'haproxy_f.bout', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
'fscur': {
|
||||
|
@ -100,11 +95,11 @@ CHARTS = {
|
|||
'lines': []
|
||||
},
|
||||
'bbin': {
|
||||
'options': [None, 'Kilobytes In', 'KB/s', 'backend', 'haproxy_b.bin', 'line'],
|
||||
'options': [None, 'Kilobytes In', 'KiB/s', 'backend', 'haproxy_b.bin', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
'bbout': {
|
||||
'options': [None, 'Kilobytes Out', 'KB/s', 'backend', 'haproxy_b.bout', 'line'],
|
||||
'options': [None, 'Kilobytes Out', 'KiB/s', 'backend', 'haproxy_b.bout', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
'bscur': {
|
||||
|
@ -145,41 +140,39 @@ CHARTS = {
|
|||
'lines': []
|
||||
},
|
||||
'bqtime': {
|
||||
'options': [None, 'The average queue time over the 1024 last requests', 'ms', 'backend',
|
||||
'options': [None, 'The average queue time over the 1024 last requests', 'milliseconds', 'backend',
|
||||
'haproxy_b.qtime', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
'bctime': {
|
||||
'options': [None, 'The average connect time over the 1024 last requests', 'ms', 'backend',
|
||||
'options': [None, 'The average connect time over the 1024 last requests', 'milliseconds', 'backend',
|
||||
'haproxy_b.ctime', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
'brtime': {
|
||||
'options': [None, 'The average response time over the 1024 last requests', 'ms', 'backend',
|
||||
'options': [None, 'The average response time over the 1024 last requests', 'milliseconds', 'backend',
|
||||
'haproxy_b.rtime', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
'bttime': {
|
||||
'options': [None, 'The average total session time over the 1024 last requests', 'ms', 'backend',
|
||||
'options': [None, 'The average total session time over the 1024 last requests', 'milliseconds', 'backend',
|
||||
'haproxy_b.ttime', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
'health_sdown': {
|
||||
'options': [None, 'Backend Servers In DOWN State', 'failed servers', 'health',
|
||||
'haproxy_hs.down', 'line'],
|
||||
'options': [None, 'Backend Servers In DOWN State', 'failed servers', 'health', 'haproxy_hs.down', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
'health_sup': {
|
||||
'options': [None, 'Backend Servers In UP State', 'health servers', 'health',
|
||||
'haproxy_hs.up', 'line'],
|
||||
'options': [None, 'Backend Servers In UP State', 'health servers', 'health', 'haproxy_hs.up', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
'health_bdown': {
|
||||
'options': [None, 'Is Backend Alive? 1 = DOWN', 'failed backend', 'health', 'haproxy_hb.down', 'line'],
|
||||
'options': [None, 'Is Backend Failed?', 'boolean', 'health', 'haproxy_hb.down', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
'health_idle': {
|
||||
'options': [None, 'The Ratio Of Polling Time Vs Total Time', 'percent', 'health', 'haproxy.idle', 'line'],
|
||||
'options': [None, 'The Ratio Of Polling Time Vs Total Time', 'percentage', 'health', 'haproxy.idle', 'line'],
|
||||
'lines': [
|
||||
['idle', None, 'absolute']
|
||||
]
|
||||
|
@ -213,6 +206,7 @@ REGEX = dict(url=re_compile(r'idle = (?P<idle>[0-9]+)'),
|
|||
socket=re_compile(r'Idle_pct: (?P<idle>[0-9]+)'))
|
||||
|
||||
|
||||
# TODO: the code is unreadable
|
||||
class Service(UrlService, SocketService):
|
||||
def __init__(self, configuration=None, name=None):
|
||||
if 'socket' in configuration:
|
||||
|
|
|
@ -12,7 +12,9 @@ from copy import deepcopy
|
|||
from bases.FrameworkServices.SocketService import SocketService
|
||||
|
||||
|
||||
ORDER = ['temperatures']
|
||||
ORDER = [
|
||||
'temperatures',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'temperatures': {
|
||||
|
@ -39,11 +41,11 @@ class Service(SocketService):
|
|||
SocketService.__init__(self, configuration=configuration, name=name)
|
||||
self.order = ORDER
|
||||
self.definitions = deepcopy(CHARTS)
|
||||
self.do_only = self.configuration.get('devices')
|
||||
self._keep_alive = False
|
||||
self.request = ""
|
||||
self.host = "127.0.0.1"
|
||||
self.port = 7634
|
||||
self.do_only = self.configuration.get('devices')
|
||||
|
||||
def get_disks(self):
|
||||
r = self._get_raw_data()
|
||||
|
|
|
@ -28,11 +28,15 @@ HTTP_BAD_STATUS = 'bad_status'
|
|||
HTTP_TIMEOUT = 'timeout'
|
||||
HTTP_NO_CONNECTION = 'no_connection'
|
||||
|
||||
ORDER = ['response_time', 'response_length', 'status']
|
||||
ORDER = [
|
||||
'response_time',
|
||||
'response_length',
|
||||
'status',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'response_time': {
|
||||
'options': [None, 'HTTP response time', 'ms', 'response', 'httpcheck.responsetime', 'line'],
|
||||
'options': [None, 'HTTP response time', 'milliseconds', 'response', 'httpcheck.responsetime', 'line'],
|
||||
'lines': [
|
||||
[HTTP_RESPONSE_TIME, 'time', 'absolute', 100, 1000]
|
||||
]
|
||||
|
@ -59,12 +63,12 @@ CHARTS = {
|
|||
class Service(UrlService):
|
||||
def __init__(self, configuration=None, name=None):
|
||||
UrlService.__init__(self, configuration=configuration, name=name)
|
||||
self.order = ORDER
|
||||
self.definitions = CHARTS
|
||||
pattern = self.configuration.get('regex')
|
||||
self.regex = re.compile(pattern) if pattern else None
|
||||
self.status_codes_accepted = self.configuration.get('status_accepted', [200])
|
||||
self.follow_redirect = self.configuration.get('redirect', True)
|
||||
self.order = ORDER
|
||||
self.definitions = CHARTS
|
||||
|
||||
def _get_data(self):
|
||||
"""
|
||||
|
|
|
@ -8,10 +8,9 @@ import json
|
|||
from bases.FrameworkServices.UrlService import UrlService
|
||||
|
||||
|
||||
priority = 60000
|
||||
|
||||
# charts order (can be overridden if you want less charts, or different order)
|
||||
ORDER = ['listeners']
|
||||
ORDER = [
|
||||
'listeners',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'listeners': {
|
||||
|
|
|
@ -7,24 +7,17 @@ import json
|
|||
|
||||
from bases.FrameworkServices.UrlService import UrlService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
# update_every = 2
|
||||
priority = 60000
|
||||
|
||||
# default job configuration (overridden by python.d.plugin)
|
||||
# config = {'local': {
|
||||
# 'update_every': update_every,
|
||||
# 'retries': retries,
|
||||
# 'priority': priority,
|
||||
# 'url': 'http://localhost:5001'
|
||||
# }}
|
||||
|
||||
# charts order (can be overridden if you want less charts, or different order)
|
||||
ORDER = ['bandwidth', 'peers', 'repo_size', 'repo_objects']
|
||||
ORDER = [
|
||||
'bandwidth',
|
||||
'peers',
|
||||
'repo_size',
|
||||
'repo_objects',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'bandwidth': {
|
||||
'options': [None, 'IPFS Bandwidth', 'kbits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'],
|
||||
'options': [None, 'IPFS Bandwidth', 'kilobits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'],
|
||||
'lines': [
|
||||
['in', None, 'absolute', 8, 1000],
|
||||
['out', None, 'absolute', -8, 1000]
|
||||
|
@ -37,10 +30,10 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'repo_size': {
|
||||
'options': [None, 'IPFS Repo Size', 'GB', 'Size', 'ipfs.repo_size', 'area'],
|
||||
'options': [None, 'IPFS Repo Size', 'GiB', 'Size', 'ipfs.repo_size', 'area'],
|
||||
'lines': [
|
||||
['avail', None, 'absolute', 1, 1e9],
|
||||
['size', None, 'absolute', 1, 1e9],
|
||||
['avail', None, 'absolute', 1, 1 << 30],
|
||||
['size', None, 'absolute', 1, 1 << 30],
|
||||
]
|
||||
},
|
||||
'repo_objects': {
|
||||
|
@ -68,11 +61,11 @@ SI_zeroes = {
|
|||
class Service(UrlService):
|
||||
def __init__(self, configuration=None, name=None):
|
||||
UrlService.__init__(self, configuration=configuration, name=name)
|
||||
self.baseurl = self.configuration.get('url', 'http://localhost:5001')
|
||||
self.order = ORDER
|
||||
self.definitions = CHARTS
|
||||
self.__storage_max = None
|
||||
self.baseurl = self.configuration.get('url', 'http://localhost:5001')
|
||||
self.do_pinapi = self.configuration.get('pinapi')
|
||||
self.__storage_max = None
|
||||
|
||||
def _get_json(self, sub_url):
|
||||
"""
|
||||
|
|
|
@ -19,14 +19,16 @@ from copy import deepcopy
|
|||
|
||||
from bases.FrameworkServices.SimpleService import SimpleService
|
||||
|
||||
priority = 60000
|
||||
retries = 60
|
||||
|
||||
ORDER = ['pools_utilization', 'pools_active_leases', 'leases_total']
|
||||
ORDER = [
|
||||
'pools_utilization',
|
||||
'pools_active_leases',
|
||||
'leases_total',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'pools_utilization': {
|
||||
'options': [None, 'Pools Utilization', '%', 'utilization', 'isc_dhcpd.utilization', 'line'],
|
||||
'options': [None, 'Pools Utilization', 'percentage', 'utilization', 'isc_dhcpd.utilization', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
'pools_active_leases': {
|
||||
|
@ -120,7 +122,6 @@ class Service(SimpleService):
|
|||
SimpleService.__init__(self, configuration=configuration, name=name)
|
||||
self.order = ORDER
|
||||
self.definitions = deepcopy(CHARTS)
|
||||
|
||||
lease_path = self.configuration.get('leases_path', '/var/lib/dhcp/dhcpd.leases')
|
||||
self.dhcpd_leases = DhcpdLeasesFile(path=lease_path)
|
||||
self.pools = list()
|
||||
|
@ -131,7 +132,7 @@ class Service(SimpleService):
|
|||
|
||||
def check(self):
|
||||
if not HAVE_IP_ADDRESS:
|
||||
self.error("'python-ipaddress' module is needed")
|
||||
self.error("'python-ipaddress' package is needed")
|
||||
return False
|
||||
|
||||
if not self.dhcpd_leases.is_valid():
|
||||
|
@ -190,6 +191,17 @@ class Service(SimpleService):
|
|||
|
||||
def create_charts(self):
|
||||
for pool in self.pools:
|
||||
self.definitions['pools_utilization']['lines'].append([pool.id + '_utilization', pool.name,
|
||||
'absolute', 1, 100])
|
||||
self.definitions['pools_active_leases']['lines'].append([pool.id + '_active_leases', pool.name])
|
||||
dim = [
|
||||
pool.id + '_utilization',
|
||||
pool.name,
|
||||
'absolute',
|
||||
1,
|
||||
100,
|
||||
]
|
||||
self.definitions['pools_utilization']['lines'].append(dim)
|
||||
|
||||
dim = [
|
||||
pool.id + '_active_leases',
|
||||
pool.name,
|
||||
]
|
||||
self.definitions['pools_active_leases']['lines'].append(dim)
|
||||
|
|
|
@ -16,11 +16,15 @@ update_every = 10
|
|||
|
||||
# charts order (can be overridden if you want less charts, or different order)
|
||||
ORDER = [
|
||||
'net_throughput_http', 'net_throughput_https', # net throughput
|
||||
'connections_http', 'connections_https', # connections
|
||||
'requests', 'requests_processing', # requests
|
||||
'pub_cache_hits', 'private_cache_hits', # cache
|
||||
'static_hits' # static
|
||||
'net_throughput_http', # net throughput
|
||||
'net_throughput_https', # net throughput
|
||||
'connections_http', # connections
|
||||
'connections_https', # connections
|
||||
'requests', # requests
|
||||
'requests_processing', # requests
|
||||
'pub_cache_hits', # cache
|
||||
'private_cache_hits', # cache
|
||||
'static_hits', # static
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
|
|
|
@ -8,7 +8,13 @@ from bases.FrameworkServices.ExecutableService import ExecutableService
|
|||
priority = 59999
|
||||
disabled_by_default = True
|
||||
|
||||
ORDER = ['sessions', 'users', 'seats']
|
||||
LOGINCTL_COMMAND = 'loginctl list-sessions --no-legend'
|
||||
|
||||
ORDER = [
|
||||
'sessions',
|
||||
'users',
|
||||
'seats',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'sessions': {
|
||||
|
@ -39,9 +45,9 @@ CHARTS = {
|
|||
class Service(ExecutableService):
|
||||
def __init__(self, configuration=None, name=None):
|
||||
ExecutableService.__init__(self, configuration=configuration, name=name)
|
||||
self.command = 'loginctl list-sessions --no-legend'
|
||||
self.order = ORDER
|
||||
self.definitions = CHARTS
|
||||
self.command = LOGINCTL_COMMAND
|
||||
|
||||
def _get_data(self):
|
||||
ret = {
|
||||
|
|
|
@ -66,7 +66,7 @@ def battery_charts(bats):
|
|||
charts.update(
|
||||
{
|
||||
'bbu_{0}_relative_charge'.format(b.id): {
|
||||
'options': [None, 'Relative State of Charge', '%', 'battery',
|
||||
'options': [None, 'Relative State of Charge', 'percentage', 'battery',
|
||||
'megacli.bbu_relative_charge', 'line'],
|
||||
'lines': [
|
||||
['bbu_{0}_relative_charge'.format(b.id), 'adapter {0}'.format(b.id)],
|
||||
|
@ -180,8 +180,8 @@ class Service(ExecutableService):
|
|||
ExecutableService.__init__(self, configuration=configuration, name=name)
|
||||
self.order = list()
|
||||
self.definitions = dict()
|
||||
self.megacli = Megacli()
|
||||
self.do_battery = self.configuration.get('do_battery')
|
||||
self.megacli = Megacli()
|
||||
|
||||
def check_sudo(self):
|
||||
err = self._get_raw_data(command=self.megacli.sudo_check, stderr=True)
|
||||
|
|
|
@ -5,36 +5,37 @@
|
|||
|
||||
from bases.FrameworkServices.SocketService import SocketService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
# update_every = 2
|
||||
priority = 60000
|
||||
|
||||
# default job configuration (overridden by python.d.plugin)
|
||||
# config = {'local': {
|
||||
# 'update_every': update_every,
|
||||
# 'retries': retries,
|
||||
# 'priority': priority,
|
||||
# 'host': 'localhost',
|
||||
# 'port': 11211,
|
||||
# 'unix_socket': None
|
||||
# }}
|
||||
|
||||
ORDER = ['cache', 'net', 'connections', 'items', 'evicted_reclaimed',
|
||||
'get', 'get_rate', 'set_rate', 'cas', 'delete', 'increment', 'decrement', 'touch', 'touch_rate']
|
||||
ORDER = [
|
||||
'cache',
|
||||
'net',
|
||||
'connections',
|
||||
'items',
|
||||
'evicted_reclaimed',
|
||||
'get',
|
||||
'get_rate',
|
||||
'set_rate',
|
||||
'cas',
|
||||
'delete',
|
||||
'increment',
|
||||
'decrement',
|
||||
'touch',
|
||||
'touch_rate',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'cache': {
|
||||
'options': [None, 'Cache Size', 'megabytes', 'cache', 'memcached.cache', 'stacked'],
|
||||
'options': [None, 'Cache Size', 'MiB', 'cache', 'memcached.cache', 'stacked'],
|
||||
'lines': [
|
||||
['avail', 'available', 'absolute', 1, 1048576],
|
||||
['used', 'used', 'absolute', 1, 1048576]
|
||||
['avail', 'available', 'absolute', 1, 1 << 20],
|
||||
['used', 'used', 'absolute', 1, 1 << 20]
|
||||
]
|
||||
},
|
||||
'net': {
|
||||
'options': [None, 'Network', 'kilobits/s', 'network', 'memcached.net', 'area'],
|
||||
'lines': [
|
||||
['bytes_read', 'in', 'incremental', 8, 1024],
|
||||
['bytes_written', 'out', 'incremental', -8, 1024]
|
||||
['bytes_read', 'in', 'incremental', 8, 1000],
|
||||
['bytes_written', 'out', 'incremental', -8, 1000],
|
||||
]
|
||||
},
|
||||
'connections': {
|
||||
|
@ -126,13 +127,13 @@ CHARTS = {
|
|||
class Service(SocketService):
|
||||
def __init__(self, configuration=None, name=None):
|
||||
SocketService.__init__(self, configuration=configuration, name=name)
|
||||
self.order = ORDER
|
||||
self.definitions = CHARTS
|
||||
self.request = 'stats\r\n'
|
||||
self.host = 'localhost'
|
||||
self.port = 11211
|
||||
self._keep_alive = True
|
||||
self.unix_socket = None
|
||||
self.order = ORDER
|
||||
self.definitions = CHARTS
|
||||
|
||||
def _get_data(self):
|
||||
"""
|
||||
|
|
|
@ -16,10 +16,6 @@ except ImportError:
|
|||
|
||||
from bases.FrameworkServices.SimpleService import SimpleService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
# update_every = 2
|
||||
priority = 60000
|
||||
retries = 60
|
||||
|
||||
REPL_SET_STATES = [
|
||||
('1', 'primary'),
|
||||
|
@ -209,21 +205,21 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'journaling_volume': {
|
||||
'options': [None, 'Volume of data written to the journal', 'MB', 'database performance',
|
||||
'options': [None, 'Volume of data written to the journal', 'MiB', 'database performance',
|
||||
'mongodb.journaling_volume', 'line'],
|
||||
'lines': [
|
||||
['journaledMB', 'volume', 'absolute', 1, 100]
|
||||
]
|
||||
},
|
||||
'background_flush_average': {
|
||||
'options': [None, 'Average time taken by flushes to execute', 'ms', 'database performance',
|
||||
'options': [None, 'Average time taken by flushes to execute', 'milliseconds', 'database performance',
|
||||
'mongodb.background_flush_average', 'line'],
|
||||
'lines': [
|
||||
['average_ms', 'time', 'absolute', 1, 100]
|
||||
]
|
||||
},
|
||||
'background_flush_last': {
|
||||
'options': [None, 'Time taken by the last flush operation to execute', 'ms', 'database performance',
|
||||
'options': [None, 'Time taken by the last flush operation to execute', 'milliseconds', 'database performance',
|
||||
'mongodb.background_flush_last', 'line'],
|
||||
'lines': [
|
||||
['last_ms', 'time', 'absolute', 1, 100]
|
||||
|
@ -269,7 +265,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'memory': {
|
||||
'options': [None, 'Memory metrics', 'MB', 'resource utilization', 'mongodb.memory', 'stacked'],
|
||||
'options': [None, 'Memory metrics', 'MiB', 'resource utilization', 'mongodb.memory', 'stacked'],
|
||||
'lines': [
|
||||
['virtual', None, 'absolute', 1, 1],
|
||||
['resident', None, 'absolute', 1, 1],
|
||||
|
@ -313,7 +309,7 @@ CHARTS = {
|
|||
},
|
||||
'wiredtiger_cache': {
|
||||
'options': [None, 'The percentage of the wiredTiger cache that is in use and cache with dirty bytes',
|
||||
'percent', 'resource utilization', 'mongodb.wiredtiger_cache', 'stacked'],
|
||||
'percentage', 'resource utilization', 'mongodb.wiredtiger_cache', 'stacked'],
|
||||
'lines': [
|
||||
['wiredTiger_percent_clean', 'inuse', 'absolute', 1, 1000],
|
||||
['wiredTiger_percent_dirty', 'dirty', 'absolute', 1, 1000]
|
||||
|
@ -333,14 +329,14 @@ CHARTS = {
|
|||
'lines': []
|
||||
},
|
||||
'tcmalloc_generic': {
|
||||
'options': [None, 'Tcmalloc generic metrics', 'MB', 'tcmalloc', 'mongodb.tcmalloc_generic', 'stacked'],
|
||||
'options': [None, 'Tcmalloc generic metrics', 'MiB', 'tcmalloc', 'mongodb.tcmalloc_generic', 'stacked'],
|
||||
'lines': [
|
||||
['current_allocated_bytes', 'allocated', 'absolute', 1, 1048576],
|
||||
['heap_size', 'heap_size', 'absolute', 1, 1048576]
|
||||
['current_allocated_bytes', 'allocated', 'absolute', 1, 1 << 20],
|
||||
['heap_size', 'heap_size', 'absolute', 1, 1 << 20]
|
||||
]
|
||||
},
|
||||
'tcmalloc_metrics': {
|
||||
'options': [None, 'Tcmalloc metrics', 'KB', 'tcmalloc', 'mongodb.tcmalloc_metrics', 'stacked'],
|
||||
'options': [None, 'Tcmalloc metrics', 'KiB', 'tcmalloc', 'mongodb.tcmalloc_metrics', 'stacked'],
|
||||
'lines': [
|
||||
['central_cache_free_bytes', 'central_cache_free', 'absolute', 1, 1024],
|
||||
['current_total_thread_cache_bytes', 'current_total_thread_cache', 'absolute', 1, 1024],
|
||||
|
|
|
@ -6,12 +6,20 @@
|
|||
import xml.etree.ElementTree as ET
|
||||
from bases.FrameworkServices.UrlService import UrlService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
# update_every = 2
|
||||
priority = 60000
|
||||
|
||||
# see enum State_Type from monit.h (https://bitbucket.org/tildeslash/monit/src/master/src/monit.h)
|
||||
MONIT_SERVICE_NAMES = ['Filesystem', 'Directory', 'File', 'Process', 'Host', 'System', 'Fifo', 'Program', 'Net']
|
||||
MONIT_SERVICE_NAMES = [
|
||||
'Filesystem',
|
||||
'Directory',
|
||||
'File',
|
||||
'Process',
|
||||
'Host',
|
||||
'System',
|
||||
'Fifo',
|
||||
'Program',
|
||||
'Net',
|
||||
]
|
||||
|
||||
DEFAULT_SERVICES_IDS = [0, 1, 2, 3, 4, 6, 7, 8]
|
||||
|
||||
# charts order (can be overridden if you want less charts, or different order)
|
||||
|
@ -89,10 +97,10 @@ CHARTS = {
|
|||
class Service(UrlService):
|
||||
def __init__(self, configuration=None, name=None):
|
||||
UrlService.__init__(self, configuration=configuration, name=name)
|
||||
base_url = self.configuration.get('url', 'http://localhost:2812')
|
||||
self.url = '{0}/_status?format=xml&level=full'.format(base_url)
|
||||
self.order = ORDER
|
||||
self.definitions = CHARTS
|
||||
base_url = self.configuration.get('url', 'http://localhost:2812')
|
||||
self.url = '{0}/_status?format=xml&level=full'.format(base_url)
|
||||
|
||||
def parse(self, data):
|
||||
try:
|
||||
|
@ -104,15 +112,19 @@ class Service(UrlService):
|
|||
|
||||
def check(self):
|
||||
self._manager = self._build_manager()
|
||||
|
||||
raw_data = self._get_raw_data()
|
||||
if not raw_data:
|
||||
return None
|
||||
|
||||
return bool(self.parse(raw_data))
|
||||
|
||||
def _get_data(self):
|
||||
raw_data = self._get_raw_data()
|
||||
|
||||
if not raw_data:
|
||||
return None
|
||||
|
||||
xml = self.parse(raw_data)
|
||||
if not xml:
|
||||
return None
|
||||
|
@ -120,6 +132,7 @@ class Service(UrlService):
|
|||
data = {}
|
||||
for service_id in DEFAULT_SERVICES_IDS:
|
||||
service_category = MONIT_SERVICE_NAMES[service_id].lower()
|
||||
|
||||
if service_category == 'system':
|
||||
self.debug("Skipping service from 'System' category, because it's useless in graphs")
|
||||
continue
|
||||
|
|
|
@ -6,9 +6,6 @@
|
|||
|
||||
from bases.FrameworkServices.MySQLService import MySQLService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
# update_every = 3
|
||||
priority = 60000
|
||||
|
||||
# query executed on MySQL server
|
||||
QUERY_GLOBAL = 'SHOW GLOBAL STATUS;'
|
||||
|
@ -207,8 +204,8 @@ CHARTS = {
|
|||
'net': {
|
||||
'options': [None, 'mysql Bandwidth', 'kilobits/s', 'bandwidth', 'mysql.net', 'area'],
|
||||
'lines': [
|
||||
['Bytes_received', 'in', 'incremental', 8, 1024],
|
||||
['Bytes_sent', 'out', 'incremental', -8, 1024]
|
||||
['Bytes_received', 'in', 'incremental', 8, 1000],
|
||||
['Bytes_sent', 'out', 'incremental', -8, 1000]
|
||||
]
|
||||
},
|
||||
'queries': {
|
||||
|
@ -320,7 +317,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'innodb_io': {
|
||||
'options': [None, 'mysql InnoDB I/O Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_io', 'area'],
|
||||
'options': [None, 'mysql InnoDB I/O Bandwidth', 'KiB/s', 'innodb', 'mysql.innodb_io', 'area'],
|
||||
'lines': [
|
||||
['Innodb_data_read', 'read', 'incremental', 1, 1024],
|
||||
['Innodb_data_written', 'write', 'incremental', -1, 1024]
|
||||
|
@ -360,7 +357,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'innodb_os_log_io': {
|
||||
'options': [None, 'mysql InnoDB OS Log Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_os_log_io', 'area'],
|
||||
'options': [None, 'mysql InnoDB OS Log Bandwidth', 'KiB/s', 'innodb', 'mysql.innodb_os_log_io', 'area'],
|
||||
'lines': [
|
||||
['Innodb_os_log_written', 'write', 'incremental', -1, 1024],
|
||||
]
|
||||
|
@ -394,7 +391,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'innodb_buffer_pool_bytes': {
|
||||
'options': [None, 'mysql InnoDB Buffer Pool Bytes', 'MB', 'innodb', 'mysql.innodb_buffer_pool_bytes', 'area'],
|
||||
'options': [None, 'mysql InnoDB Buffer Pool Bytes', 'MiB', 'innodb', 'mysql.innodb_buffer_pool_bytes', 'area'],
|
||||
'lines': [
|
||||
['Innodb_buffer_pool_bytes_data', 'data', 'absolute', 1, 1024 * 1024],
|
||||
['Innodb_buffer_pool_bytes_dirty', 'dirty', 'absolute', -1, 1024 * 1024]
|
||||
|
@ -441,7 +438,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'qcache_freemem': {
|
||||
'options': [None, 'mysql QCache Free Memory', 'MB', 'qcache', 'mysql.qcache_freemem', 'area'],
|
||||
'options': [None, 'mysql QCache Free Memory', 'MiB', 'qcache', 'mysql.qcache_freemem', 'area'],
|
||||
'lines': [
|
||||
['Qcache_free_memory', 'free', 'absolute', 1, 1024 * 1024]
|
||||
]
|
||||
|
@ -529,7 +526,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'galera_bytes': {
|
||||
'options': [None, 'Replicated bytes', 'KB/s', 'galera', 'mysql.galera_bytes', 'area'],
|
||||
'options': [None, 'Replicated bytes', 'KiB/s', 'galera', 'mysql.galera_bytes', 'area'],
|
||||
'lines': [
|
||||
['wsrep_received_bytes', 'rx', 'incremental', 1, 1024],
|
||||
['wsrep_replicated_bytes', 'tx', 'incremental', -1, 1024],
|
||||
|
@ -563,7 +560,11 @@ class Service(MySQLService):
|
|||
MySQLService.__init__(self, configuration=configuration, name=name)
|
||||
self.order = ORDER
|
||||
self.definitions = CHARTS
|
||||
self.queries = dict(global_status=QUERY_GLOBAL, slave_status=QUERY_SLAVE, variables=QUERY_VARIABLES)
|
||||
self.queries = dict(
|
||||
global_status=QUERY_GLOBAL,
|
||||
slave_status=QUERY_SLAVE,
|
||||
variables=QUERY_VARIABLES,
|
||||
)
|
||||
|
||||
def _get_data(self):
|
||||
|
||||
|
|
|
@ -5,37 +5,30 @@
|
|||
|
||||
from bases.FrameworkServices.UrlService import UrlService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
# update_every = 2
|
||||
priority = 60000
|
||||
|
||||
# default job configuration (overridden by python.d.plugin)
|
||||
# config = {'local': {
|
||||
# 'update_every': update_every,
|
||||
# 'retries': retries,
|
||||
# 'priority': priority,
|
||||
# 'url': 'http://localhost/stub_status'
|
||||
# }}
|
||||
|
||||
# charts order (can be overridden if you want less charts, or different order)
|
||||
ORDER = ['connections', 'requests', 'connection_status', 'connect_rate']
|
||||
ORDER = [
|
||||
'connections',
|
||||
'requests',
|
||||
'connection_status',
|
||||
'connect_rate',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'connections': {
|
||||
'options': [None, 'nginx Active Connections', 'connections', 'active connections',
|
||||
'options': [None, 'Active Connections', 'connections', 'active connections',
|
||||
'nginx.connections', 'line'],
|
||||
'lines': [
|
||||
['active']
|
||||
]
|
||||
},
|
||||
'requests': {
|
||||
'options': [None, 'nginx Requests', 'requests/s', 'requests', 'nginx.requests', 'line'],
|
||||
'options': [None, 'Requests', 'requests/s', 'requests', 'nginx.requests', 'line'],
|
||||
'lines': [
|
||||
['requests', None, 'incremental']
|
||||
]
|
||||
},
|
||||
'connection_status': {
|
||||
'options': [None, 'nginx Active Connections by Status', 'connections', 'status',
|
||||
'options': [None, 'Active Connections by Status', 'connections', 'status',
|
||||
'nginx.connection_status', 'line'],
|
||||
'lines': [
|
||||
['reading'],
|
||||
|
@ -44,7 +37,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'connect_rate': {
|
||||
'options': [None, 'nginx Connections Rate', 'connections/s', 'connections rate',
|
||||
'options': [None, 'Connections Rate', 'connections/s', 'connections rate',
|
||||
'nginx.connect_rate', 'line'],
|
||||
'lines': [
|
||||
['accepts', 'accepted', 'incremental'],
|
||||
|
@ -57,9 +50,9 @@ CHARTS = {
|
|||
class Service(UrlService):
|
||||
def __init__(self, configuration=None, name=None):
|
||||
UrlService.__init__(self, configuration=configuration, name=name)
|
||||
self.url = self.configuration.get('url', 'http://localhost/stub_status')
|
||||
self.order = ORDER
|
||||
self.definitions = CHARTS
|
||||
self.url = self.configuration.get('url', 'http://localhost/stub_status')
|
||||
|
||||
def _get_data(self):
|
||||
"""
|
||||
|
|
|
@ -16,11 +16,7 @@ except ImportError:
|
|||
|
||||
from bases.FrameworkServices.UrlService import UrlService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
update_every = 1
|
||||
priority = 60000
|
||||
|
||||
# charts order (can be overridden if you want less charts, or different order)
|
||||
ORDER = [
|
||||
'requests_total',
|
||||
'requests_current',
|
||||
|
@ -75,7 +71,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'ssl_memory_usage': {
|
||||
'options': [None, 'Memory Usage', '%', 'ssl', 'nginx_plus.ssl_memory_usage', 'area'],
|
||||
'options': [None, 'Memory Usage', 'percentage', 'ssl', 'nginx_plus.ssl_memory_usage', 'area'],
|
||||
'lines': [
|
||||
['ssl_memory_usage', 'usage', 'absolute', 1, 100]
|
||||
]
|
||||
|
@ -94,7 +90,7 @@ def cache_charts(cache):
|
|||
charts = OrderedDict()
|
||||
|
||||
charts['{0}_traffic'.format(cache.name)] = {
|
||||
'options': [None, 'Traffic', 'KB', family, 'nginx_plus.cache_traffic', 'stacked'],
|
||||
'options': [None, 'Traffic', 'KiB', family, 'nginx_plus.cache_traffic', 'stacked'],
|
||||
'lines': [
|
||||
['_'.join([cache.name, 'hit_bytes']), 'served', 'absolute', 1, 1024],
|
||||
['_'.join([cache.name, 'miss_bytes_written']), 'written', 'absolute', 1, 1024],
|
||||
|
@ -102,7 +98,7 @@ def cache_charts(cache):
|
|||
]
|
||||
}
|
||||
charts['{0}_memory_usage'.format(cache.name)] = {
|
||||
'options': [None, 'Memory Usage', '%', family, 'nginx_plus.cache_memory_usage', 'area'],
|
||||
'options': [None, 'Memory Usage', 'percentage', family, 'nginx_plus.cache_memory_usage', 'area'],
|
||||
'lines': [
|
||||
['_'.join([cache.name, 'memory_usage']), 'usage', 'absolute', 1, 100],
|
||||
]
|
||||
|
@ -199,7 +195,8 @@ def web_upstream_charts(wu):
|
|||
'lines': dimensions('active')
|
||||
}
|
||||
charts['web_upstream_{name}_connections_usage'.format(name=wu.name)] = {
|
||||
'options': [None, 'Peers Connections Usage', '%', family, 'nginx_plus.web_upstream_connections_usage', 'line'],
|
||||
'options': [None, 'Peers Connections Usage', 'percentage', family,
|
||||
'nginx_plus.web_upstream_connections_usage', 'line'],
|
||||
'lines': dimensions('connections_usage', d=100)
|
||||
}
|
||||
# Traffic
|
||||
|
@ -222,7 +219,7 @@ def web_upstream_charts(wu):
|
|||
# Response Time
|
||||
for peer in wu:
|
||||
charts['web_upstream_{0}_{1}_timings'.format(wu.name, peer.server)] = {
|
||||
'options': [None, 'Peer "{0}" Timings'.format(peer.real_server), 'ms', family,
|
||||
'options': [None, 'Peer "{0}" Timings'.format(peer.real_server), 'milliseconds', family,
|
||||
'nginx_plus.web_upstream_peer_timings', 'line'],
|
||||
'lines': [
|
||||
['_'.join([wu.name, peer.server, 'header_time']), 'header'],
|
||||
|
@ -231,7 +228,7 @@ def web_upstream_charts(wu):
|
|||
}
|
||||
# Memory Usage
|
||||
charts['web_upstream_{name}_memory_usage'.format(name=wu.name)] = {
|
||||
'options': [None, 'Memory Usage', '%', family, 'nginx_plus.web_upstream_memory_usage', 'area'],
|
||||
'options': [None, 'Memory Usage', 'percentage', family, 'nginx_plus.web_upstream_memory_usage', 'area'],
|
||||
'lines': [
|
||||
['_'.join([wu.name, 'memory_usage']), 'usage', 'absolute', 1, 100]
|
||||
]
|
||||
|
|
|
@ -7,12 +7,20 @@ import re
|
|||
|
||||
from bases.FrameworkServices.ExecutableService import ExecutableService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
priority = 60000
|
||||
|
||||
update_every = 30
|
||||
|
||||
# charts order (can be overridden if you want less charts, or different order)
|
||||
ORDER = ['queries', 'zones', 'protocol', 'type', 'transfer', 'rcode']
|
||||
NSD_CONTROL_COMMAND = 'nsd-control stats_noreset'
|
||||
REGEX = re.compile(r'([A-Za-z0-9.]+)=(\d+)')
|
||||
|
||||
ORDER = [
|
||||
'queries',
|
||||
'zones',
|
||||
'protocol',
|
||||
'type',
|
||||
'transfer',
|
||||
'rcode',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'queries': {
|
||||
|
@ -78,22 +86,21 @@ CHARTS = {
|
|||
|
||||
class Service(ExecutableService):
|
||||
def __init__(self, configuration=None, name=None):
|
||||
ExecutableService.__init__(
|
||||
self, configuration=configuration, name=name)
|
||||
self.command = 'nsd-control stats_noreset'
|
||||
ExecutableService.__init__(self, configuration=configuration, name=name)
|
||||
self.order = ORDER
|
||||
self.definitions = CHARTS
|
||||
self.regex = re.compile(r'([A-Za-z0-9.]+)=(\d+)')
|
||||
self.command = NSD_CONTROL_COMMAND
|
||||
|
||||
def _get_data(self):
|
||||
lines = self._get_raw_data()
|
||||
if not lines:
|
||||
return None
|
||||
|
||||
r = self.regex
|
||||
stats = dict((k.replace('.', '_'), int(v))
|
||||
for k, v in r.findall(''.join(lines)))
|
||||
stats = dict(
|
||||
(k.replace('.', '_'), int(v)) for k, v in REGEX.findall(''.join(lines))
|
||||
)
|
||||
stats.setdefault('num_opcode_NOTIFY', 0)
|
||||
stats.setdefault('num_type_TYPE252', 0)
|
||||
stats.setdefault('num_type_TYPE255', 0)
|
||||
|
||||
return stats
|
||||
|
|
|
@ -9,9 +9,6 @@ import re
|
|||
|
||||
from bases.FrameworkServices.SocketService import SocketService
|
||||
|
||||
# default module values
|
||||
update_every = 1
|
||||
priority = 60000
|
||||
|
||||
# NTP Control Message Protocol constants
|
||||
MODE = 6
|
||||
|
@ -53,13 +50,15 @@ ORDER = [
|
|||
|
||||
CHARTS = {
|
||||
'sys_offset': {
|
||||
'options': [None, 'Combined offset of server relative to this host', 'ms', 'system', 'ntpd.sys_offset', 'area'],
|
||||
'options': [None, 'Combined offset of server relative to this host', 'milliseconds',
|
||||
'system', 'ntpd.sys_offset', 'area'],
|
||||
'lines': [
|
||||
['offset', 'offset', 'absolute', 1, PRECISION]
|
||||
]
|
||||
},
|
||||
'sys_jitter': {
|
||||
'options': [None, 'Combined system jitter and clock jitter', 'ms', 'system', 'ntpd.sys_jitter', 'line'],
|
||||
'options': [None, 'Combined system jitter and clock jitter', 'milliseconds',
|
||||
'system', 'ntpd.sys_jitter', 'line'],
|
||||
'lines': [
|
||||
['sys_jitter', 'system', 'absolute', 1, PRECISION],
|
||||
['clk_jitter', 'clock', 'absolute', 1, PRECISION]
|
||||
|
@ -78,14 +77,14 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'sys_rootdelay': {
|
||||
'options': [None, 'Total roundtrip delay to the primary reference clock', 'ms', 'system',
|
||||
'options': [None, 'Total roundtrip delay to the primary reference clock', 'milliseconds', 'system',
|
||||
'ntpd.sys_rootdelay', 'area'],
|
||||
'lines': [
|
||||
['rootdelay', 'delay', 'absolute', 1, PRECISION]
|
||||
]
|
||||
},
|
||||
'sys_rootdisp': {
|
||||
'options': [None, 'Total root dispersion to the primary reference clock', 'ms', 'system',
|
||||
'options': [None, 'Total root dispersion to the primary reference clock', 'milliseconds', 'system',
|
||||
'ntpd.sys_rootdisp', 'area'],
|
||||
'lines': [
|
||||
['rootdisp', 'dispersion', 'absolute', 1, PRECISION]
|
||||
|
@ -114,27 +113,27 @@ CHARTS = {
|
|||
|
||||
PEER_CHARTS = {
|
||||
'peer_offset': {
|
||||
'options': [None, 'Filter offset', 'ms', 'peers', 'ntpd.peer_offset', 'line'],
|
||||
'options': [None, 'Filter offset', 'milliseconds', 'peers', 'ntpd.peer_offset', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
'peer_delay': {
|
||||
'options': [None, 'Filter delay', 'ms', 'peers', 'ntpd.peer_delay', 'line'],
|
||||
'options': [None, 'Filter delay', 'milliseconds', 'peers', 'ntpd.peer_delay', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
'peer_dispersion': {
|
||||
'options': [None, 'Filter dispersion', 'ms', 'peers', 'ntpd.peer_dispersion', 'line'],
|
||||
'options': [None, 'Filter dispersion', 'milliseconds', 'peers', 'ntpd.peer_dispersion', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
'peer_jitter': {
|
||||
'options': [None, 'Filter jitter', 'ms', 'peers', 'ntpd.peer_jitter', 'line'],
|
||||
'options': [None, 'Filter jitter', 'milliseconds', 'peers', 'ntpd.peer_jitter', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
'peer_xleave': {
|
||||
'options': [None, 'Interleave delay', 'ms', 'peers', 'ntpd.peer_xleave', 'line'],
|
||||
'options': [None, 'Interleave delay', 'milliseconds', 'peers', 'ntpd.peer_xleave', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
'peer_rootdelay': {
|
||||
'options': [None, 'Total roundtrip delay to the primary reference clock', 'ms', 'peers',
|
||||
'options': [None, 'Total roundtrip delay to the primary reference clock', 'milliseconds', 'peers',
|
||||
'ntpd.peer_rootdelay', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
|
@ -234,7 +233,6 @@ class Service(SocketService):
|
|||
SocketService.__init__(self, configuration=configuration, name=name)
|
||||
self.order = list(ORDER)
|
||||
self.definitions = dict(CHARTS)
|
||||
|
||||
self.port = 'ntp'
|
||||
self.dgram_socket = True
|
||||
self.system = System()
|
||||
|
@ -243,7 +241,6 @@ class Service(SocketService):
|
|||
self.retries = 0
|
||||
self.show_peers = self.configuration.get('show_peers', False)
|
||||
self.peer_rescan = self.configuration.get('peer_rescan', 60)
|
||||
|
||||
if self.show_peers:
|
||||
self.definitions.update(PEER_CHARTS)
|
||||
|
||||
|
|
|
@ -49,39 +49,39 @@ def gpu_charts(gpu):
|
|||
|
||||
charts = {
|
||||
PCI_BANDWIDTH: {
|
||||
'options': [None, 'PCI Express Bandwidth Utilization', 'KB/s', fam, 'nvidia_smi.pci_bandwidth', 'area'],
|
||||
'options': [None, 'PCI Express Bandwidth Utilization', 'KiB/s', fam, 'nvidia_smi.pci_bandwidth', 'area'],
|
||||
'lines': [
|
||||
['rx_util', 'rx', 'absolute', 1, 1],
|
||||
['tx_util', 'tx', 'absolute', 1, -1],
|
||||
]
|
||||
},
|
||||
FAN_SPEED: {
|
||||
'options': [None, 'Fan Speed', '%', fam, 'nvidia_smi.fan_speed', 'line'],
|
||||
'options': [None, 'Fan Speed', 'percentage', fam, 'nvidia_smi.fan_speed', 'line'],
|
||||
'lines': [
|
||||
['fan_speed', 'speed'],
|
||||
]
|
||||
},
|
||||
GPU_UTIL: {
|
||||
'options': [None, 'GPU Utilization', '%', fam, 'nvidia_smi.gpu_utilization', 'line'],
|
||||
'options': [None, 'GPU Utilization', 'percentage', fam, 'nvidia_smi.gpu_utilization', 'line'],
|
||||
'lines': [
|
||||
['gpu_util', 'utilization'],
|
||||
]
|
||||
},
|
||||
MEM_UTIL: {
|
||||
'options': [None, 'Memory Bandwidth Utilization', '%', fam, 'nvidia_smi.mem_utilization', 'line'],
|
||||
'options': [None, 'Memory Bandwidth Utilization', 'percentage', fam, 'nvidia_smi.mem_utilization', 'line'],
|
||||
'lines': [
|
||||
['memory_util', 'utilization'],
|
||||
]
|
||||
},
|
||||
ENCODER_UTIL: {
|
||||
'options': [None, 'Encoder/Decoder Utilization', '%', fam, 'nvidia_smi.encoder_utilization', 'line'],
|
||||
'options': [None, 'Encoder/Decoder Utilization', 'percentage', fam, 'nvidia_smi.encoder_utilization', 'line'],
|
||||
'lines': [
|
||||
['encoder_util', 'encoder'],
|
||||
['decoder_util', 'decoder'],
|
||||
]
|
||||
},
|
||||
MEM_ALLOCATED: {
|
||||
'options': [None, 'Memory Allocated', 'MB', fam, 'nvidia_smi.memory_allocated', 'line'],
|
||||
'options': [None, 'Memory Allocated', 'MiB', fam, 'nvidia_smi.memory_allocated', 'line'],
|
||||
'lines': [
|
||||
['fb_memory_usage', 'used'],
|
||||
]
|
||||
|
@ -316,7 +316,6 @@ class Service(SimpleService):
|
|||
super(Service, self).__init__(configuration=configuration, name=name)
|
||||
self.order = list()
|
||||
self.definitions = dict()
|
||||
|
||||
poll = int(configuration.get('poll_seconds', 1))
|
||||
self.poller = NvidiaSMIPoller(poll)
|
||||
|
||||
|
|
|
@ -11,8 +11,6 @@ except ImportError:
|
|||
|
||||
from bases.FrameworkServices.SimpleService import SimpleService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
priority = 60000
|
||||
|
||||
DEFAULT_SERVER = 'localhost'
|
||||
DEFAULT_PORT = '389'
|
||||
|
@ -36,7 +34,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'bytes_sent': {
|
||||
'options': [None, 'Traffic', 'KB/s', 'ldap', 'openldap.traffic_stats', 'line'],
|
||||
'options': [None, 'Traffic', 'KiB/s', 'ldap', 'openldap.traffic_stats', 'line'],
|
||||
'lines': [
|
||||
['bytes_sent', 'sent', 'incremental', 1, 1024]
|
||||
]
|
||||
|
@ -136,13 +134,11 @@ class Service(SimpleService):
|
|||
SimpleService.__init__(self, configuration=configuration, name=name)
|
||||
self.order = ORDER
|
||||
self.definitions = CHARTS
|
||||
|
||||
self.server = configuration.get('server', DEFAULT_SERVER)
|
||||
self.port = configuration.get('port', DEFAULT_PORT)
|
||||
self.username = configuration.get('username')
|
||||
self.password = configuration.get('password')
|
||||
self.timeout = configuration.get('timeout', DEFAULT_TIMEOUT)
|
||||
|
||||
self.alive = False
|
||||
self.conn = None
|
||||
|
||||
|
|
|
@ -3,14 +3,18 @@
|
|||
# Author: l2isbad
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from re import compile as r_compile
|
||||
import re
|
||||
|
||||
from bases.FrameworkServices.SimpleService import SimpleService
|
||||
|
||||
priority = 60000
|
||||
|
||||
update_every = 10
|
||||
|
||||
ORDER = ['users', 'traffic']
|
||||
ORDER = [
|
||||
'users',
|
||||
'traffic',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'users': {
|
||||
'options': [None, 'OpenVPN Active Users', 'active users', 'users', 'openvpn_status.users', 'line'],
|
||||
|
@ -19,15 +23,20 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'traffic': {
|
||||
'options': [None, 'OpenVPN Traffic', 'KB/s', 'traffic', 'openvpn_status.traffic', 'area'],
|
||||
'options': [None, 'OpenVPN Traffic', 'KiB/s', 'traffic', 'openvpn_status.traffic', 'area'],
|
||||
'lines': [
|
||||
['bytes_in', 'in', 'incremental', 1, 1 << 10], ['bytes_out', 'out', 'incremental', 1, -1 << 10]
|
||||
['bytes_in', 'in', 'incremental', 1, 1 << 10],
|
||||
['bytes_out', 'out', 'incremental', -1, 1 << 10]
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
TLS_REGEX = r_compile(r'(?:[0-9a-f]+:[0-9a-f:]+|(?:\d{1,3}(?:\.\d{1,3}){3}(?::\d+)?)) (?P<bytes_in>\d+) (?P<bytes_out>\d+)')
|
||||
STATIC_KEY_REGEX = r_compile(r'TCP/[A-Z]+ (?P<direction>(?:read|write)) bytes,(?P<bytes>\d+)')
|
||||
TLS_REGEX = re.compile(
|
||||
r'(?:[0-9a-f]+:[0-9a-f:]+|(?:\d{1,3}(?:\.\d{1,3}){3}(?::\d+)?)) (?P<bytes_in>\d+) (?P<bytes_out>\d+)'
|
||||
)
|
||||
STATIC_KEY_REGEX = re.compile(
|
||||
r'TCP/[A-Z]+ (?P<direction>(?:read|write)) bytes,(?P<bytes>\d+)'
|
||||
)
|
||||
|
||||
|
||||
class Service(SimpleService):
|
||||
|
|
|
@ -9,19 +9,8 @@ import re
|
|||
|
||||
from bases.FrameworkServices.UrlService import UrlService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
# update_every = 2
|
||||
priority = 60000
|
||||
|
||||
# default job configuration (overridden by python.d.plugin)
|
||||
# config = {'local': {
|
||||
# 'update_every': update_every,
|
||||
# 'retries': retries,
|
||||
# 'priority': priority,
|
||||
# 'url': 'http://localhost/status?full&json'
|
||||
# }}
|
||||
|
||||
# charts order (can be overridden if you want less charts, or different order)
|
||||
REGEX = re.compile(r'([a-z][a-z ]+): ([\d.]+)')
|
||||
|
||||
POOL_INFO = [
|
||||
('active processes', 'active'),
|
||||
|
@ -49,7 +38,14 @@ CALC = [
|
|||
('avg', average)
|
||||
]
|
||||
|
||||
ORDER = ['connections', 'requests', 'performance', 'request_duration', 'request_cpu', 'request_mem']
|
||||
ORDER = [
|
||||
'connections',
|
||||
'requests',
|
||||
'performance',
|
||||
'request_duration',
|
||||
'request_cpu',
|
||||
'request_mem',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'connections': {
|
||||
|
@ -84,7 +80,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'request_cpu': {
|
||||
'options': [None, 'PHP-FPM Request CPU', 'percent', 'request CPU', 'phpfpm.request_cpu', 'line'],
|
||||
'options': [None, 'PHP-FPM Request CPU', 'percentage', 'request CPU', 'phpfpm.request_cpu', 'line'],
|
||||
'lines': [
|
||||
['minReqCpu', 'min'],
|
||||
['maxReqCpu', 'max'],
|
||||
|
@ -92,7 +88,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'request_mem': {
|
||||
'options': [None, 'PHP-FPM Request Memory', 'kilobytes', 'request memory', 'phpfpm.request_mem', 'line'],
|
||||
'options': [None, 'PHP-FPM Request Memory', 'KB', 'request memory', 'phpfpm.request_mem', 'line'],
|
||||
'lines': [
|
||||
['minReqMem', 'min', 'absolute', 1, 1024],
|
||||
['maxReqMem', 'max', 'absolute', 1, 1024],
|
||||
|
@ -105,14 +101,14 @@ CHARTS = {
|
|||
class Service(UrlService):
|
||||
def __init__(self, configuration=None, name=None):
|
||||
UrlService.__init__(self, configuration=configuration, name=name)
|
||||
self.url = self.configuration.get('url', 'http://localhost/status?full&json')
|
||||
self.order = ORDER
|
||||
self.definitions = CHARTS
|
||||
self.regex = re.compile(r'([a-z][a-z ]+): ([\d.]+)')
|
||||
self.url = self.configuration.get('url', 'http://localhost/status?full&json')
|
||||
self.json = '&json' in self.url or '?json' in self.url
|
||||
self.json_full = self.url.endswith(('?full&json', '?json&full'))
|
||||
self.if_all_processes_running = dict([(c_name + p_name, 0) for c_name, func in CALC
|
||||
for metric, p_name in PER_PROCESS_INFO])
|
||||
self.if_all_processes_running = dict(
|
||||
[(c_name + p_name, 0) for c_name, func in CALC for metric, p_name in PER_PROCESS_INFO]
|
||||
)
|
||||
|
||||
def _get_data(self):
|
||||
"""
|
||||
|
@ -123,7 +119,7 @@ class Service(UrlService):
|
|||
if not raw:
|
||||
return None
|
||||
|
||||
raw_json = parse_raw_data_(is_json=self.json, regex=self.regex, raw_data=raw)
|
||||
raw_json = parse_raw_data_(is_json=self.json, raw_data=raw)
|
||||
|
||||
# Per Pool info: active connections, requests and performance charts
|
||||
to_netdata = fetch_data_(raw_data=raw_json, metrics_list=POOL_INFO)
|
||||
|
@ -159,7 +155,7 @@ def fetch_data_(raw_data, metrics_list, pid=''):
|
|||
return result
|
||||
|
||||
|
||||
def parse_raw_data_(is_json, regex, raw_data):
|
||||
def parse_raw_data_(is_json, raw_data):
|
||||
"""
|
||||
:param is_json: bool
|
||||
:param regex: compiled regular expr
|
||||
|
@ -173,4 +169,4 @@ def parse_raw_data_(is_json, regex, raw_data):
|
|||
return dict()
|
||||
else:
|
||||
raw_data = ' '.join(raw_data.split())
|
||||
return dict(regex.findall(raw_data))
|
||||
return dict(REGEX.findall(raw_data))
|
||||
|
|
|
@ -12,8 +12,6 @@ except ImportError:
|
|||
|
||||
from bases.FrameworkServices.SimpleService import SimpleService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
priority = 60000
|
||||
|
||||
PORT_LATENCY = 'connect'
|
||||
|
||||
|
@ -25,7 +23,7 @@ ORDER = ['latency', 'status']
|
|||
|
||||
CHARTS = {
|
||||
'latency': {
|
||||
'options': [None, 'TCP connect latency', 'ms', 'latency', 'portcheck.latency', 'line'],
|
||||
'options': [None, 'TCP connect latency', 'milliseconds', 'latency', 'portcheck.latency', 'line'],
|
||||
'lines': [
|
||||
[PORT_LATENCY, 'connect', 'absolute', 100, 1000]
|
||||
]
|
||||
|
|
|
@ -5,12 +5,12 @@
|
|||
|
||||
from bases.FrameworkServices.ExecutableService import ExecutableService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
# update_every = 2
|
||||
priority = 60000
|
||||
POSTQUEUE_COMMAND = 'postqueue -p'
|
||||
|
||||
# charts order (can be overridden if you want less charts, or different order)
|
||||
ORDER = ['qemails', 'qsize']
|
||||
ORDER = [
|
||||
'qemails',
|
||||
'qsize',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'qemails': {
|
||||
|
@ -20,7 +20,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'qsize': {
|
||||
'options': [None, 'Postfix Queue Emails Size', 'emails size in KB', 'queue', 'postfix.qsize', 'area'],
|
||||
'options': [None, 'Postfix Queue Emails Size', 'KiB', 'queue', 'postfix.qsize', 'area'],
|
||||
'lines': [
|
||||
['size', None, 'absolute']
|
||||
]
|
||||
|
@ -31,9 +31,9 @@ CHARTS = {
|
|||
class Service(ExecutableService):
|
||||
def __init__(self, configuration=None, name=None):
|
||||
ExecutableService.__init__(self, configuration=configuration, name=name)
|
||||
self.command = 'postqueue -p'
|
||||
self.order = ORDER
|
||||
self.definitions = CHARTS
|
||||
self.command = POSTQUEUE_COMMAND
|
||||
|
||||
def _get_data(self):
|
||||
"""
|
||||
|
|
|
@ -636,7 +636,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'db_stat_temp_bytes': {
|
||||
'options': [None, 'Temp files written to disk', 'KB/s', 'db statistics', 'postgres.db_stat_temp_bytes',
|
||||
'options': [None, 'Temp files written to disk', 'KiB/s', 'db statistics', 'postgres.db_stat_temp_bytes',
|
||||
'line'],
|
||||
'lines': [
|
||||
['temp_bytes', 'size', 'incremental', 1, 1024]
|
||||
|
@ -650,7 +650,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'database_size': {
|
||||
'options': [None, 'Database size', 'MB', 'database size', 'postgres.db_size', 'stacked'],
|
||||
'options': [None, 'Database size', 'MiB', 'database size', 'postgres.db_size', 'stacked'],
|
||||
'lines': [
|
||||
]
|
||||
},
|
||||
|
@ -669,7 +669,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'index_size': {
|
||||
'options': [None, 'Indexes size', 'MB', 'indexes', 'postgres.index_size', 'line'],
|
||||
'options': [None, 'Indexes size', 'MiB', 'indexes', 'postgres.index_size', 'line'],
|
||||
'lines': [
|
||||
['index_size', 'size', 'absolute', 1, 1024 * 1024]
|
||||
]
|
||||
|
@ -681,7 +681,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'table_size': {
|
||||
'options': [None, 'Tables size', 'MB', 'tables', 'postgres.table_size', 'line'],
|
||||
'options': [None, 'Tables size', 'MiB', 'tables', 'postgres.table_size', 'line'],
|
||||
'lines': [
|
||||
['table_size', 'size', 'absolute', 1, 1024 * 1024]
|
||||
]
|
||||
|
@ -695,7 +695,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'wal_writes': {
|
||||
'options': [None, 'Write-Ahead Logs', 'kilobytes/s', 'wal_writes', 'postgres.wal_writes', 'line'],
|
||||
'options': [None, 'Write-Ahead Logs', 'KiB/s', 'wal_writes', 'postgres.wal_writes', 'line'],
|
||||
'lines': [
|
||||
['wal_writes', 'writes', 'incremental', 1, 1024]
|
||||
]
|
||||
|
@ -716,20 +716,20 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'stat_bgwriter_alloc': {
|
||||
'options': [None, 'Buffers allocated', 'kilobytes/s', 'bgwriter', 'postgres.stat_bgwriter_alloc', 'line'],
|
||||
'options': [None, 'Buffers allocated', 'KiB/s', 'bgwriter', 'postgres.stat_bgwriter_alloc', 'line'],
|
||||
'lines': [
|
||||
['buffers_alloc', 'alloc', 'incremental', 1, 1024]
|
||||
]
|
||||
},
|
||||
'stat_bgwriter_checkpoint': {
|
||||
'options': [None, 'Buffers written during checkpoints', 'kilobytes/s', 'bgwriter',
|
||||
'options': [None, 'Buffers written during checkpoints', 'KiB/s', 'bgwriter',
|
||||
'postgres.stat_bgwriter_checkpoint', 'line'],
|
||||
'lines': [
|
||||
['buffers_checkpoint', 'checkpoint', 'incremental', 1, 1024]
|
||||
]
|
||||
},
|
||||
'stat_bgwriter_backend': {
|
||||
'options': [None, 'Buffers written directly by a backend', 'kilobytes/s', 'bgwriter',
|
||||
'options': [None, 'Buffers written directly by a backend', 'KiB/s', 'bgwriter',
|
||||
'postgres.stat_bgwriter_backend', 'line'],
|
||||
'lines': [
|
||||
['buffers_backend', 'backend', 'incremental', 1, 1024]
|
||||
|
@ -742,7 +742,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'stat_bgwriter_bgwriter': {
|
||||
'options': [None, 'Buffers written by the background writer', 'kilobytes/s', 'bgwriter',
|
||||
'options': [None, 'Buffers written by the background writer', 'KiB/s', 'bgwriter',
|
||||
'postgres.bgwriter_bgwriter', 'line'],
|
||||
'lines': [
|
||||
['buffers_clean', 'clean', 'incremental', 1, 1024]
|
||||
|
@ -766,7 +766,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'standby_delta': {
|
||||
'options': [None, 'Standby delta', 'kilobytes', 'replication delta', 'postgres.standby_delta', 'line'],
|
||||
'options': [None, 'Standby delta', 'KiB', 'replication delta', 'postgres.standby_delta', 'line'],
|
||||
'lines': [
|
||||
['sent_delta', 'sent delta', 'absolute', 1, 1024],
|
||||
['write_delta', 'write delta', 'absolute', 1, 1024],
|
||||
|
@ -789,24 +789,19 @@ class Service(SimpleService):
|
|||
SimpleService.__init__(self, configuration=configuration, name=name)
|
||||
self.order = list(ORDER)
|
||||
self.definitions = deepcopy(CHARTS)
|
||||
|
||||
self.do_table_stats = configuration.pop('table_stats', False)
|
||||
self.do_index_stats = configuration.pop('index_stats', False)
|
||||
self.databases_to_poll = configuration.pop('database_poll', None)
|
||||
self.statement_timeout = configuration.pop('statement_timeout', DEFAULT_STATEMENT_TIMEOUT)
|
||||
self.configuration = configuration
|
||||
|
||||
self.conn = None
|
||||
self.server_version = None
|
||||
self.is_superuser = False
|
||||
self.alive = False
|
||||
|
||||
self.databases = list()
|
||||
self.secondaries = list()
|
||||
self.replication_slots = list()
|
||||
|
||||
self.queries = dict()
|
||||
|
||||
self.data = dict()
|
||||
|
||||
def reconnect(self):
|
||||
|
|
|
@ -8,9 +8,14 @@ from json import loads
|
|||
|
||||
from bases.FrameworkServices.UrlService import UrlService
|
||||
|
||||
priority = 60000
|
||||
|
||||
ORDER = ['questions', 'cache_usage', 'cache_size', 'latency']
|
||||
ORDER = [
|
||||
'questions',
|
||||
'cache_usage',
|
||||
'cache_size',
|
||||
'latency',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'questions': {
|
||||
'options': [None, 'PowerDNS Queries and Answers', 'count', 'questions', 'powerdns.questions', 'line'],
|
||||
|
|
|
@ -5,10 +5,6 @@
|
|||
|
||||
from bases.FrameworkServices.MySQLService import MySQLService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
# update_every = 3
|
||||
priority = 60000
|
||||
|
||||
|
||||
def query(table, *params):
|
||||
return 'SELECT {params} FROM {table}'.format(table=table, params=', '.join(params))
|
||||
|
@ -132,8 +128,8 @@ CHARTS = {
|
|||
'options': [None, 'ProxySQL Backend Overall Bandwidth', 'kilobits/s', 'overall_bandwidth',
|
||||
'proxysql.pool_overall_net', 'area'],
|
||||
'lines': [
|
||||
['bytes_data_recv', 'in', 'incremental', 8, 1024],
|
||||
['bytes_data_sent', 'out', 'incremental', -8, 1024]
|
||||
['bytes_data_recv', 'in', 'incremental', 8, 1000],
|
||||
['bytes_data_sent', 'out', 'incremental', -8, 1000]
|
||||
]
|
||||
},
|
||||
'questions': {
|
||||
|
@ -155,7 +151,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'pool_latency': {
|
||||
'options': [None, 'ProxySQL Backend Latency', 'ms', 'latency', 'proxysql.latency', 'line'],
|
||||
'options': [None, 'ProxySQL Backend Latency', 'milliseconds', 'latency', 'proxysql.latency', 'line'],
|
||||
'lines': []
|
||||
},
|
||||
'connections': {
|
||||
|
@ -193,7 +189,7 @@ CHARTS = {
|
|||
'lines': []
|
||||
},
|
||||
'commands_duration': {
|
||||
'options': [None, 'ProxySQL Commands Duration', 'ms', 'commands', 'proxysql.commands_duration', 'line'],
|
||||
'options': [None, 'ProxySQL Commands Duration', 'milliseconds', 'commands', 'proxysql.commands_duration', 'line'],
|
||||
'lines': []
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,28 +11,31 @@
|
|||
# and tls_cert_file options then.
|
||||
#
|
||||
|
||||
from bases.FrameworkServices.UrlService import UrlService
|
||||
from json import loads
|
||||
import socket
|
||||
|
||||
from json import loads
|
||||
|
||||
from bases.FrameworkServices.UrlService import UrlService
|
||||
|
||||
update_every = 5
|
||||
priority = 60000
|
||||
|
||||
|
||||
MB = 1048576
|
||||
MiB = 1 << 20
|
||||
CPU_SCALE = 1000
|
||||
|
||||
ORDER = [
|
||||
'jvm_heap',
|
||||
'jvm_nonheap',
|
||||
'cpu',
|
||||
'fd_open',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'jvm_heap': {
|
||||
'options': [None, 'JVM Heap', 'MB', 'resources', 'puppet.jvm', 'area'],
|
||||
'options': [None, 'JVM Heap', 'MiB', 'resources', 'puppet.jvm', 'area'],
|
||||
'lines': [
|
||||
['jvm_heap_committed', 'committed', 'absolute', 1, MB],
|
||||
['jvm_heap_used', 'used', 'absolute', 1, MB],
|
||||
['jvm_heap_committed', 'committed', 'absolute', 1, MiB],
|
||||
['jvm_heap_used', 'used', 'absolute', 1, MiB],
|
||||
],
|
||||
'variables': [
|
||||
['jvm_heap_max'],
|
||||
|
@ -40,10 +43,10 @@ CHARTS = {
|
|||
],
|
||||
},
|
||||
'jvm_nonheap': {
|
||||
'options': [None, 'JVM Non-Heap', 'MB', 'resources', 'puppet.jvm', 'area'],
|
||||
'options': [None, 'JVM Non-Heap', 'MiB', 'resources', 'puppet.jvm', 'area'],
|
||||
'lines': [
|
||||
['jvm_nonheap_committed', 'committed', 'absolute', 1, MB],
|
||||
['jvm_nonheap_used', 'used', 'absolute', 1, MB],
|
||||
['jvm_nonheap_committed', 'committed', 'absolute', 1, MiB],
|
||||
['jvm_nonheap_used', 'used', 'absolute', 1, MiB],
|
||||
],
|
||||
'variables': [
|
||||
['jvm_nonheap_max'],
|
||||
|
@ -72,9 +75,9 @@ CHARTS = {
|
|||
class Service(UrlService):
|
||||
def __init__(self, configuration=None, name=None):
|
||||
UrlService.__init__(self, configuration=configuration, name=name)
|
||||
self.url = 'https://{0}:8140'.format(socket.getfqdn())
|
||||
self.order = ORDER
|
||||
self.definitions = CHARTS
|
||||
self.url = 'https://{0}:8140'.format(socket.getfqdn())
|
||||
|
||||
def _get_data(self):
|
||||
# NOTE: there are several ways to retrieve data
|
||||
|
|
|
@ -14,9 +14,6 @@ except ImportError:
|
|||
|
||||
from bases.FrameworkServices.UrlService import UrlService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
update_every = 1
|
||||
priority = 60000
|
||||
|
||||
METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
|
||||
|
||||
|
@ -63,15 +60,15 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'memory': {
|
||||
'options': [None, 'Memory', 'MB', 'overview', 'rabbitmq.memory', 'area'],
|
||||
'options': [None, 'Memory', 'MiB', 'overview', 'rabbitmq.memory', 'area'],
|
||||
'lines': [
|
||||
['mem_used', 'used', 'absolute', 1, 1024 << 10]
|
||||
['mem_used', 'used', 'absolute', 1, 1 << 20]
|
||||
]
|
||||
},
|
||||
'disk_space': {
|
||||
'options': [None, 'Disk Space', 'GB', 'overview', 'rabbitmq.disk_space', 'area'],
|
||||
'options': [None, 'Disk Space', 'GiB', 'overview', 'rabbitmq.disk_space', 'area'],
|
||||
'lines': [
|
||||
['disk_free', 'free', 'absolute', 1, 1024 ** 3]
|
||||
['disk_free', 'free', 'absolute', 1, 1 << 30]
|
||||
]
|
||||
},
|
||||
'socket_descriptors': {
|
||||
|
|
|
@ -47,13 +47,13 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'hit_rate': {
|
||||
'options': [None, 'Hit rate', 'percent', 'hits', 'redis.hit_rate', 'line'],
|
||||
'options': [None, 'Hit rate', 'percentage', 'hits', 'redis.hit_rate', 'line'],
|
||||
'lines': [
|
||||
['hit_rate', 'rate', 'absolute']
|
||||
]
|
||||
},
|
||||
'memory': {
|
||||
'options': [None, 'Memory utilization', 'kilobytes', 'memory', 'redis.memory', 'line'],
|
||||
'options': [None, 'Memory utilization', 'KiB', 'memory', 'redis.memory', 'line'],
|
||||
'lines': [
|
||||
['used_memory', 'total', 'absolute', 1, 1024],
|
||||
['used_memory_lua', 'lua', 'absolute', 1, 1024]
|
||||
|
@ -62,8 +62,8 @@ CHARTS = {
|
|||
'net': {
|
||||
'options': [None, 'Bandwidth', 'kilobits/s', 'network', 'redis.net', 'area'],
|
||||
'lines': [
|
||||
['total_net_input_bytes', 'in', 'incremental', 8, 1024],
|
||||
['total_net_output_bytes', 'out', 'incremental', -8, 1024]
|
||||
['total_net_input_bytes', 'in', 'incremental', 8, 1000],
|
||||
['total_net_output_bytes', 'out', 'incremental', -8, 1000]
|
||||
]
|
||||
},
|
||||
'keys_redis': {
|
||||
|
@ -146,16 +146,13 @@ RE = re.compile(r'\n([a-z_0-9 ]+):(?:keys=)?([^,\r]+)')
|
|||
class Service(SocketService):
|
||||
def __init__(self, configuration=None, name=None):
|
||||
SocketService.__init__(self, configuration=configuration, name=name)
|
||||
self._keep_alive = True
|
||||
|
||||
self.order = list()
|
||||
self.definitions = dict()
|
||||
|
||||
self._keep_alive = True
|
||||
self.host = self.configuration.get('host', 'localhost')
|
||||
self.port = self.configuration.get('port', 6379)
|
||||
self.unix_socket = self.configuration.get('socket')
|
||||
p = self.configuration.get('pass')
|
||||
|
||||
self.auth_request = 'AUTH {0} \r\n'.format(p).encode() if p else None
|
||||
self.request = 'INFO\r\n'.encode()
|
||||
self.bgsave_time = 0
|
||||
|
|
|
@ -136,13 +136,11 @@ class Service(SimpleService):
|
|||
SimpleService.__init__(self, configuration=configuration, name=name)
|
||||
self.order = list(ORDER)
|
||||
self.definitions = cluster_charts()
|
||||
|
||||
self.host = self.configuration.get('host', '127.0.0.1')
|
||||
self.port = self.configuration.get('port', 28015)
|
||||
self.user = self.configuration.get('user', 'admin')
|
||||
self.password = self.configuration.get('password')
|
||||
self.timeout = self.configuration.get('timeout', 2)
|
||||
|
||||
self.conn = None
|
||||
self.alive = True
|
||||
|
||||
|
|
|
@ -7,25 +7,25 @@ import json
|
|||
|
||||
from bases.FrameworkServices.UrlService import UrlService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
# update_every = 2
|
||||
priority = 60000
|
||||
|
||||
# charts order (can be overridden if you want less charts, or different order)
|
||||
ORDER = ['bandwidth', 'peers', 'dht']
|
||||
ORDER = [
|
||||
'bandwidth',
|
||||
'peers',
|
||||
'dht',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'bandwidth': {
|
||||
'options': [None, 'RetroShare Bandwidth', 'kB/s', 'RetroShare', 'retroshare.bandwidth', 'area'],
|
||||
'options': [None, 'RetroShare Bandwidth', 'kilobits/s', 'RetroShare', 'retroshare.bandwidth', 'area'],
|
||||
'lines': [
|
||||
['bandwidth_up_kb', 'Upload'],
|
||||
['bandwidth_up_kb', 'Upload'],
|
||||
['bandwidth_down_kb', 'Download']
|
||||
]
|
||||
},
|
||||
'peers': {
|
||||
'options': [None, 'RetroShare Peers', 'peers', 'RetroShare', 'retroshare.peers', 'line'],
|
||||
'lines': [
|
||||
['peers_all', 'All friends'],
|
||||
['peers_all', 'All friends'],
|
||||
['peers_connected', 'Connected friends']
|
||||
]
|
||||
},
|
||||
|
@ -33,7 +33,7 @@ CHARTS = {
|
|||
'options': [None, 'Retroshare DHT', 'peers', 'RetroShare', 'retroshare.dht', 'line'],
|
||||
'lines': [
|
||||
['dht_size_all', 'DHT nodes estimated'],
|
||||
['dht_size_rs', 'RS nodes estimated']
|
||||
['dht_size_rs', 'RS nodes estimated']
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -42,9 +42,9 @@ CHARTS = {
|
|||
class Service(UrlService):
|
||||
def __init__(self, configuration=None, name=None):
|
||||
UrlService.__init__(self, configuration=configuration, name=name)
|
||||
self.baseurl = self.configuration.get('url', 'http://localhost:9090')
|
||||
self.order = ORDER
|
||||
self.definitions = CHARTS
|
||||
self.baseurl = self.configuration.get('url', 'http://localhost:9090')
|
||||
|
||||
def _get_stats(self):
|
||||
"""
|
||||
|
|
|
@ -24,9 +24,7 @@ from bases.FrameworkServices.ExecutableService import ExecutableService
|
|||
|
||||
disabled_by_default = True
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
update_every = 5
|
||||
priority = 60000
|
||||
|
||||
ORDER = [
|
||||
'syscall_rw',
|
||||
|
@ -40,14 +38,14 @@ ORDER = [
|
|||
|
||||
CHARTS = {
|
||||
'syscall_rw': {
|
||||
'options': [None, 'R/Ws', 'kilobytes/s', 'syscall', 'syscall.rw', 'area'],
|
||||
'options': [None, 'R/Ws', 'KiB/s', 'syscall', 'syscall.rw', 'area'],
|
||||
'lines': [
|
||||
['syscall_sendfile_bytes', 'sendfile', 'incremental', 1, 1024],
|
||||
['syscall_recvfile_bytes', 'recvfile', 'incremental', -1, 1024]
|
||||
]
|
||||
},
|
||||
'smb2_rw': {
|
||||
'options': [None, 'R/Ws', 'kilobytes/s', 'smb2', 'smb2.rw', 'area'],
|
||||
'options': [None, 'R/Ws', 'KiB/s', 'smb2', 'smb2.rw', 'area'],
|
||||
'lines': [
|
||||
['smb2_read_outbytes', 'readout', 'incremental', 1, 1024],
|
||||
['smb2_write_inbytes', 'writein', 'incremental', -1, 1024],
|
||||
|
|
|
@ -7,8 +7,6 @@ from third_party import lm_sensors as sensors
|
|||
|
||||
from bases.FrameworkServices.SimpleService import SimpleService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
# update_every = 2
|
||||
|
||||
ORDER = [
|
||||
'temperature',
|
||||
|
|
|
@ -268,7 +268,7 @@ CHARTS = {
|
|||
'algo': INCREMENTAL,
|
||||
},
|
||||
'reserved_block_count': {
|
||||
'options': [None, 'Reserved Block Count', '%', 'wear', 'smartd_log.reserved_block_count', 'line'],
|
||||
'options': [None, 'Reserved Block Count', 'percentage', 'wear', 'smartd_log.reserved_block_count', 'line'],
|
||||
'lines': [],
|
||||
'attrs': [ATTR170],
|
||||
'algo': ABSOLUTE,
|
||||
|
@ -321,7 +321,7 @@ CHARTS = {
|
|||
|
||||
},
|
||||
'percent_lifetime_used': {
|
||||
'options': [None, 'Percent Lifetime Used', '%', 'wear', 'smartd_log.percent_lifetime_used', 'line'],
|
||||
'options': [None, 'Percent Lifetime Used', 'percentage', 'wear', 'smartd_log.percent_lifetime_used', 'line'],
|
||||
'lines': [],
|
||||
'attrs': [ATTR202],
|
||||
'algo': ABSOLUTE,
|
||||
|
@ -586,11 +586,9 @@ class Service(SimpleService):
|
|||
SimpleService.__init__(self, configuration=configuration, name=name)
|
||||
self.order = ORDER
|
||||
self.definitions = deepcopy(CHARTS)
|
||||
|
||||
self.log_path = configuration.get('log_path', DEF_PATH)
|
||||
self.age = configuration.get('age', DEF_AGE)
|
||||
self.exclude = configuration.get('exclude_disks', str()).split()
|
||||
|
||||
self.disks = list()
|
||||
self.runs = 0
|
||||
|
||||
|
|
|
@ -16,7 +16,10 @@ update_every = 5
|
|||
|
||||
PRECISION = 100
|
||||
|
||||
ORDER = ['tps', 'users']
|
||||
ORDER = [
|
||||
'tps',
|
||||
'users',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'tps': {
|
||||
|
|
|
@ -6,12 +6,14 @@
|
|||
import json
|
||||
from bases.FrameworkServices.UrlService import UrlService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
# update_every = 2
|
||||
priority = 60000
|
||||
|
||||
|
||||
DEFAULT_ORDER = ['response_code', 'threads', 'gc_time', 'gc_ope', 'heap']
|
||||
DEFAULT_ORDER = [
|
||||
'response_code',
|
||||
'threads',
|
||||
'gc_time',
|
||||
'gc_ope',
|
||||
'heap',
|
||||
]
|
||||
|
||||
DEFAULT_CHARTS = {
|
||||
'response_code': {
|
||||
|
@ -59,7 +61,7 @@ DEFAULT_CHARTS = {
|
|||
]
|
||||
},
|
||||
'heap': {
|
||||
'options': [None, "Heap Memory Usage", "KB", "heap memory", "springboot.heap", "area"],
|
||||
'options': [None, "Heap Memory Usage", "KiB", "heap memory", "springboot.heap", "area"],
|
||||
'lines': [
|
||||
["heap_committed", 'committed', "absolute"],
|
||||
["heap_used", 'used', "absolute"],
|
||||
|
|
|
@ -6,12 +6,12 @@
|
|||
from bases.FrameworkServices.SocketService import SocketService
|
||||
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
# update_every = 2
|
||||
priority = 60000
|
||||
|
||||
# charts order (can be overridden if you want less charts, or different order)
|
||||
ORDER = ['clients_net', 'clients_requests', 'servers_net', 'servers_requests']
|
||||
ORDER = [
|
||||
'clients_net',
|
||||
'clients_requests',
|
||||
'servers_net',
|
||||
'servers_requests',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'clients_net': {
|
||||
|
|
|
@ -8,12 +8,18 @@ import xml.etree.ElementTree as ET
|
|||
|
||||
from bases.FrameworkServices.UrlService import UrlService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
# update_every = 2
|
||||
priority = 60000
|
||||
MiB = 1 << 20
|
||||
|
||||
# charts order (can be overridden if you want less charts, or different order)
|
||||
ORDER = ['accesses', 'bandwidth', 'processing_time', 'threads', 'jvm', 'jvm_eden', 'jvm_survivor', 'jvm_tenured']
|
||||
ORDER = [
|
||||
'accesses',
|
||||
'bandwidth',
|
||||
'processing_time',
|
||||
'threads',
|
||||
'jvm',
|
||||
'jvm_eden',
|
||||
'jvm_survivor',
|
||||
'jvm_tenured',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'accesses': {
|
||||
|
@ -24,7 +30,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'bandwidth': {
|
||||
'options': [None, 'Bandwidth', 'KB/s', 'statistics', 'tomcat.bandwidth', 'area'],
|
||||
'options': [None, 'Bandwidth', 'KiB/s', 'statistics', 'tomcat.bandwidth', 'area'],
|
||||
'lines': [
|
||||
['bytesSent', 'sent', 'incremental', 1, 1024],
|
||||
['bytesReceived', 'received', 'incremental', 1, 1024],
|
||||
|
@ -44,39 +50,39 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'jvm': {
|
||||
'options': [None, 'JVM Memory Pool Usage', 'MB', 'memory', 'tomcat.jvm', 'stacked'],
|
||||
'options': [None, 'JVM Memory Pool Usage', 'MiB', 'memory', 'tomcat.jvm', 'stacked'],
|
||||
'lines': [
|
||||
['free', 'free', 'absolute', 1, 1048576],
|
||||
['eden_used', 'eden', 'absolute', 1, 1048576],
|
||||
['survivor_used', 'survivor', 'absolute', 1, 1048576],
|
||||
['tenured_used', 'tenured', 'absolute', 1, 1048576],
|
||||
['code_cache_used', 'code cache', 'absolute', 1, 1048576],
|
||||
['compressed_used', 'compressed', 'absolute', 1, 1048576],
|
||||
['metaspace_used', 'metaspace', 'absolute', 1, 1048576],
|
||||
['free', 'free', 'absolute', 1, MiB],
|
||||
['eden_used', 'eden', 'absolute', 1, MiB],
|
||||
['survivor_used', 'survivor', 'absolute', 1, MiB],
|
||||
['tenured_used', 'tenured', 'absolute', 1, MiB],
|
||||
['code_cache_used', 'code cache', 'absolute', 1, MiB],
|
||||
['compressed_used', 'compressed', 'absolute', 1, MiB],
|
||||
['metaspace_used', 'metaspace', 'absolute', 1, MiB],
|
||||
]
|
||||
},
|
||||
'jvm_eden': {
|
||||
'options': [None, 'Eden Memory Usage', 'MB', 'memory', 'tomcat.jvm_eden', 'area'],
|
||||
'options': [None, 'Eden Memory Usage', 'MiB', 'memory', 'tomcat.jvm_eden', 'area'],
|
||||
'lines': [
|
||||
['eden_used', 'used', 'absolute', 1, 1048576],
|
||||
['eden_committed', 'committed', 'absolute', 1, 1048576],
|
||||
['eden_max', 'max', 'absolute', 1, 1048576]
|
||||
['eden_used', 'used', 'absolute', 1, MiB],
|
||||
['eden_committed', 'committed', 'absolute', 1, MiB],
|
||||
['eden_max', 'max', 'absolute', 1, MiB]
|
||||
]
|
||||
},
|
||||
'jvm_survivor': {
|
||||
'options': [None, 'Survivor Memory Usage', 'MB', 'memory', 'tomcat.jvm_survivor', 'area'],
|
||||
'options': [None, 'Survivor Memory Usage', 'MiB', 'memory', 'tomcat.jvm_survivor', 'area'],
|
||||
'lines': [
|
||||
['survivor_used', 'used', 'absolute', 1, 1048576],
|
||||
['survivor_committed', 'committed', 'absolute', 1, 1048576],
|
||||
['survivor_max', 'max', 'absolute', 1, 1048576]
|
||||
['survivor_used', 'used', 'absolute', 1, MiB],
|
||||
['survivor_committed', 'committed', 'absolute', 1, MiB],
|
||||
['survivor_max', 'max', 'absolute', 1, MiB],
|
||||
]
|
||||
},
|
||||
'jvm_tenured': {
|
||||
'options': [None, 'Tenured Memory Usage', 'MB', 'memory', 'tomcat.jvm_tenured', 'area'],
|
||||
'options': [None, 'Tenured Memory Usage', 'MiB', 'memory', 'tomcat.jvm_tenured', 'area'],
|
||||
'lines': [
|
||||
['tenured_used', 'used', 'absolute', 1, 1048576],
|
||||
['tenured_committed', 'committed', 'absolute', 1, 1048576],
|
||||
['tenured_max', 'max', 'absolute', 1, 1048576]
|
||||
['tenured_used', 'used', 'absolute', 1, MiB],
|
||||
['tenured_committed', 'committed', 'absolute', 1, MiB],
|
||||
['tenured_max', 'max', 'absolute', 1, MiB]
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -85,10 +91,10 @@ CHARTS = {
|
|||
class Service(UrlService):
|
||||
def __init__(self, configuration=None, name=None):
|
||||
UrlService.__init__(self, configuration=configuration, name=name)
|
||||
self.url = self.configuration.get('url', 'http://127.0.0.1:8080/manager/status?XML=true')
|
||||
self.connector_name = self.configuration.get('connector_name', None)
|
||||
self.order = ORDER
|
||||
self.definitions = CHARTS
|
||||
self.url = self.configuration.get('url', 'http://127.0.0.1:8080/manager/status?XML=true')
|
||||
self.connector_name = self.configuration.get('connector_name', None)
|
||||
|
||||
def _get_data(self):
|
||||
"""
|
||||
|
|
|
@ -24,7 +24,7 @@ ORDER = [
|
|||
|
||||
CHARTS = {
|
||||
'traffic': {
|
||||
'options': [None, 'Tor Traffic', 'KB/s', 'traffic', 'tor.traffic', 'area'],
|
||||
'options': [None, 'Tor Traffic', 'KiB/s', 'traffic', 'tor.traffic', 'area'],
|
||||
'lines': [
|
||||
['read', 'read', 'incremental', 1, 1024],
|
||||
['write', 'write', 'incremental', 1, -1024],
|
||||
|
@ -39,10 +39,8 @@ class Service(SimpleService):
|
|||
super(Service, self).__init__(configuration=configuration, name=name)
|
||||
self.order = ORDER
|
||||
self.definitions = CHARTS
|
||||
|
||||
self.port = self.configuration.get('control_port', DEF_PORT)
|
||||
self.password = self.configuration.get('password')
|
||||
|
||||
self.use_socket = isinstance(self.port, str) and self.port != DEF_PORT and not self.port.isdigit()
|
||||
self.conn = None
|
||||
self.alive = False
|
||||
|
|
|
@ -3,15 +3,13 @@
|
|||
# Author: Alexandre Menezes (@ale_menezes)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from json import loads
|
||||
from collections import defaultdict
|
||||
|
||||
from json import loads
|
||||
|
||||
from bases.FrameworkServices.UrlService import UrlService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
update_every = 1
|
||||
priority = 60000
|
||||
|
||||
# charts order (can be overridden if you want less charts, or different order)
|
||||
ORDER = [
|
||||
'response_statuses',
|
||||
'response_codes',
|
||||
|
@ -98,14 +96,22 @@ class Service(UrlService):
|
|||
self.url = self.configuration.get('url', 'http://localhost:8080/health')
|
||||
self.order = ORDER
|
||||
self.definitions = CHARTS
|
||||
self.data = {
|
||||
'successful_requests': 0, 'redirects': 0, 'bad_requests': 0,
|
||||
'server_errors': 0, 'other_requests': 0, '1xx': 0, '2xx': 0,
|
||||
'3xx': 0, '4xx': 0, '5xx': 0, 'other': 0,
|
||||
'average_response_time_per_iteration_sec': 0
|
||||
}
|
||||
self.last_total_response_time = 0
|
||||
self.last_total_count = 0
|
||||
self.data = {
|
||||
'successful_requests': 0,
|
||||
'redirects': 0,
|
||||
'bad_requests': 0,
|
||||
'server_errors': 0,
|
||||
'other_requests': 0,
|
||||
'1xx': 0,
|
||||
'2xx': 0,
|
||||
'3xx': 0,
|
||||
'4xx': 0,
|
||||
'5xx': 0,
|
||||
'other': 0,
|
||||
'average_response_time_per_iteration_sec': 0,
|
||||
}
|
||||
|
||||
def _get_data(self):
|
||||
data = self._get_raw_data()
|
||||
|
|
|
@ -13,7 +13,11 @@ from bases.loaders import YamlOrderedLoader
|
|||
|
||||
PRECISION = 1000
|
||||
|
||||
ORDER = ['queries', 'recursion', 'reqlist']
|
||||
ORDER = [
|
||||
'queries',
|
||||
'recursion',
|
||||
'reqlist',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'queries': {
|
||||
|
|
|
@ -7,9 +7,6 @@ import json
|
|||
from copy import deepcopy
|
||||
from bases.FrameworkServices.SocketService import SocketService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
# update_every = 2
|
||||
priority = 60000
|
||||
|
||||
ORDER = [
|
||||
'requests',
|
||||
|
@ -39,27 +36,27 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'tx': {
|
||||
'options': [None, 'Transmitted data', 'KB/s', 'requests', 'uwsgi.tx', 'stacked'],
|
||||
'options': [None, 'Transmitted data', 'KiB/s', 'requests', 'uwsgi.tx', 'stacked'],
|
||||
'lines': [
|
||||
['tx', 'tx', 'incremental']
|
||||
]
|
||||
},
|
||||
'avg_rt': {
|
||||
'options': [None, 'Average request time', 'ms', 'requests', 'uwsgi.avg_rt', 'line'],
|
||||
'options': [None, 'Average request time', 'milliseconds', 'requests', 'uwsgi.avg_rt', 'line'],
|
||||
'lines': [
|
||||
['avg_rt', 'avg_rt', 'absolute']
|
||||
]
|
||||
},
|
||||
'memory_rss': {
|
||||
'options': [None, 'RSS (Resident Set Size)', 'MB', 'memory', 'uwsgi.memory_rss', 'stacked'],
|
||||
'options': [None, 'RSS (Resident Set Size)', 'MiB', 'memory', 'uwsgi.memory_rss', 'stacked'],
|
||||
'lines': [
|
||||
['memory_rss', 'memory_rss', 'absolute', 1, 1024 * 1024]
|
||||
['memory_rss', 'memory_rss', 'absolute', 1, 1 << 20]
|
||||
]
|
||||
},
|
||||
'memory_vsz': {
|
||||
'options': [None, 'VSZ (Virtual Memory Size)', 'MB', 'memory', 'uwsgi.memory_vsz', 'stacked'],
|
||||
'options': [None, 'VSZ (Virtual Memory Size)', 'MiB', 'memory', 'uwsgi.memory_vsz', 'stacked'],
|
||||
'lines': [
|
||||
['memory_vsz', 'memory_vsz', 'absolute', 1, 1024 * 1024]
|
||||
['memory_vsz', 'memory_vsz', 'absolute', 1, 1 << 20]
|
||||
]
|
||||
},
|
||||
'exceptions': {
|
||||
|
@ -86,15 +83,13 @@ CHARTS = {
|
|||
class Service(SocketService):
|
||||
def __init__(self, configuration=None, name=None):
|
||||
super(Service, self).__init__(configuration=configuration, name=name)
|
||||
self.url = self.configuration.get('host', 'localhost')
|
||||
self.port = self.configuration.get('port', 1717)
|
||||
self.order = ORDER
|
||||
self.definitions = deepcopy(CHARTS)
|
||||
|
||||
self.url = self.configuration.get('host', 'localhost')
|
||||
self.port = self.configuration.get('port', 1717)
|
||||
# Clear dynamic dimensions, these are added during `_get_data()` to allow adding workers at run-time
|
||||
for chart in DYNAMIC_CHARTS:
|
||||
self.definitions[chart]['lines'] = []
|
||||
|
||||
self.last_result = {}
|
||||
self.workers = []
|
||||
|
||||
|
|
|
@ -8,9 +8,6 @@ import re
|
|||
from bases.collection import find_binary
|
||||
from bases.FrameworkServices.ExecutableService import ExecutableService
|
||||
|
||||
# default module values (can be overridden per job in `config`)
|
||||
# update_every = 2
|
||||
priority = 60000
|
||||
|
||||
ORDER = [
|
||||
'session_connections',
|
||||
|
@ -46,7 +43,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'all_time_hit_rate': {
|
||||
'options': [None, 'All History Hit Rate Ratio', 'percent', 'cache performance',
|
||||
'options': [None, 'All History Hit Rate Ratio', 'percentage', 'cache performance',
|
||||
'varnish.all_time_hit_rate', 'stacked'],
|
||||
'lines': [
|
||||
['cache_hit', 'hit', 'percentage-of-absolute-row'],
|
||||
|
@ -54,7 +51,7 @@ CHARTS = {
|
|||
['cache_hitpass', 'hitpass', 'percentage-of-absolute-row']]
|
||||
},
|
||||
'current_poll_hit_rate': {
|
||||
'options': [None, 'Current Poll Hit Rate Ratio', 'percent', 'cache performance',
|
||||
'options': [None, 'Current Poll Hit Rate Ratio', 'percentage', 'cache performance',
|
||||
'varnish.current_poll_hit_rate', 'stacked'],
|
||||
'lines': [
|
||||
['cache_hit', 'hit', 'percentage-of-incremental-row'],
|
||||
|
@ -126,7 +123,7 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'memory_usage': {
|
||||
'options': [None, 'Memory Usage', 'MB', 'memory usage', 'varnish.memory_usage', 'stacked'],
|
||||
'options': [None, 'Memory Usage', 'MiB', 'memory usage', 'varnish.memory_usage', 'stacked'],
|
||||
'lines': [
|
||||
['memory_free', 'free', 'absolute', 1, 1 << 20],
|
||||
['memory_allocated', 'allocated', 'absolute', 1, 1 << 20]]
|
||||
|
|
|
@ -16,7 +16,9 @@ W1_DIR = '/sys/bus/w1/devices/'
|
|||
# Lines matching the following regular expression contain a temperature value
|
||||
RE_TEMP = re.compile(r' t=(\d+)')
|
||||
|
||||
ORDER = ['temp']
|
||||
ORDER = [
|
||||
'temp',
|
||||
]
|
||||
|
||||
CHARTS = {
|
||||
'temp': {
|
||||
|
|
|
@ -25,7 +25,9 @@ from bases.collection import read_last_line
|
|||
from bases.FrameworkServices.LogService import LogService
|
||||
|
||||
|
||||
ORDER_APACHE_CACHE = ['apache_cache']
|
||||
ORDER_APACHE_CACHE = [
|
||||
'apache_cache',
|
||||
]
|
||||
|
||||
ORDER_WEB = [
|
||||
'response_statuses',
|
||||
|
@ -182,7 +184,7 @@ CHARTS_WEB = {
|
|||
|
||||
CHARTS_APACHE_CACHE = {
|
||||
'apache_cache': {
|
||||
'options': [None, 'Apache Cached Responses', 'percent cached', 'cached', 'web_log.apache_cache_cache',
|
||||
'options': [None, 'Apache Cached Responses', 'percentage', 'cached', 'web_log.apache_cache_cache',
|
||||
'stacked'],
|
||||
'lines': [
|
||||
['hit', 'cache', 'percentage-of-absolute-row'],
|
||||
|
|
Loading…
Add table
Reference in a new issue