diff --git a/.codacy.yml b/.codacy.yml index 0e3f443650..87a5b30058 100644 --- a/.codacy.yml +++ b/.codacy.yml @@ -1,15 +1,15 @@ --- exclude_paths: - - python.d/python_modules/pyyaml2/** - - python.d/python_modules/pyyaml3/** - - python.d/python_modules/urllib3/** - - python.d/python_modules/lm_sensors.py + - collectors/python.d.plugin/python_modules/pyyaml2/** + - collectors/python.d.plugin/python_modules/pyyaml3/** + - collectors/python.d.plugin/python_modules/urllib3/** + - collectors/python.d.plugin/python_modules/lm_sensors.py - web/css/** - web/lib/** - web/old/** - - node.d/node_modules/lib/** - - node.d/node_modules/asn1-ber.js - - node.d/node_modules/net-snmp.js - - node.d/node_modules/pixl-xml.js - - node.d/node_modules/extend.js + - collectors/node.d.plugin/node_modules/lib/** + - collectors/node.d.plugin/node_modules/asn1-ber.js + - collectors/node.d.plugin/node_modules/net-snmp.js + - collectors/node.d.plugin/node_modules/pixl-xml.js + - collectors/node.d.plugin/node_modules/extend.js - tests/** diff --git a/.codeclimate.yml b/.codeclimate.yml index 8fa0e2c2ec..8a11c84a6c 100644 --- a/.codeclimate.yml +++ b/.codeclimate.yml @@ -81,7 +81,6 @@ plugins: enabled: false exclude_patterns: - ".gitignore" - - "conf.d/" - ".githooks/" - "tests/" - "m4/" @@ -89,12 +88,12 @@ exclude_patterns: - "web/lib/" - "web/fonts/" - "web/old/" - - "python.d/python_modules/pyyaml2/" - - "python.d/python_modules/pyyaml3/" - - "python.d/python_modules/urllib3/" - - "node.d/node_modules/lib/" - - "node.d/node_modules/asn1-ber.js" - - "node.d/node_modules/extend.js" - - "node.d/node_modules/pixl-xml.js" - - "node.d/node_modules/net-snmp.js" + - "collectors/python.d.plugin/python_modules/pyyaml2/" + - "collectors/python.d.plugin/python_modules/pyyaml3/" + - "collectors/python.d.plugin/python_modules/urllib3/" + - "collectors/node.d.plugin/node_modules/lib/" + - "collectors/node.d.plugin/node_modules/asn1-ber.js" + - "collectors/node.d.plugin/node_modules/extend.js" + - "collectors/node.d.plugin/node_modules/pixl-xml.js" + - "collectors/node.d.plugin/node_modules/net-snmp.js" diff --git a/.gitignore b/.gitignore index 16b5f59bf1..2011a7ad5d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,8 @@ .deps .libs .dirstamp +.project +.pydevproject *.o *.a @@ -62,15 +64,15 @@ netdata-coverity-analysis.tgz .settings/ README TODO.md -conf.d/netdata.conf -src/TODO.txt +netdata.conf +TODO.txt -web/chart-info/ -web/control.html -web/datasource.css -web/gadget.xml -web/index_new.html -web/version.txt +web/gui/chart-info/ +web/gui/control.html +web/gui/datasource.css +web/gui/gadget.xml +web/gui/index_new.html +web/gui/version.txt # related to karma/javascript/node /node_modules/ @@ -83,15 +85,15 @@ system/netdata.logrotate system/netdata.service system/netdata.plist system/netdata-freebsd +system/edit-config -conf.d/edit-config -plugins.d/alarm-notify.sh -src/plugins/linux-cgroups.plugin/cgroup-name.sh -plugins.d/charts.d.plugin -plugins.d/fping.plugin -plugins.d/node.d.plugin -plugins.d/python.d.plugin -plugins.d/tc-qos-helper.sh +health/alarm-notify.sh +collectors/cgroups.plugin/cgroup-name.sh +collectors/tc.plugin/tc-qos-helper.sh +collectors/charts.d.plugin/charts.d.plugin +collectors/node.d.plugin/node.d.plugin +collectors/python.d.plugin/python.d.plugin +collectors/fping.plugin/fping.plugin # installer generated files netdata-uninstaller.sh @@ -117,7 +119,9 @@ diagrams/*.atxt diagrams/plantuml.jar # cppcheck -src/cppcheck-build/ +cppcheck-build/ + +venv/ # debugging / profiling makeself/debug/ diff --git a/.lgtm.yml b/.lgtm.yml index eb062d50fd..0815aadb53 100644 --- a/.lgtm.yml +++ b/.lgtm.yml @@ -8,15 +8,15 @@ # https://lgtm.com/help/lgtm/lgtm.yml-configuration-file path_classifiers: library: - - python.d/python_modules/third_party/ - - python.d/python_modules/urllib3/ - - python.d/python_modules/pyyaml2/ - - python.d/python_modules/pyyaml3/ - - node.d/node_modules/lib/ - - node.d/node_modules/asn1-ber.js - - node.d/node_modules/extend.js - - node.d/node_modules/net-snmp.js - - node.d/node_modules/pixl-xml.js + - collectors/python.d.plugin/python_modules/third_party/ + - collectors/python.d.plugin/python_modules/urllib3/ + - collectors/python.d.plugin/python_modules/pyyaml2/ + - collectors/python.d.plugin/python_modules/pyyaml3/ + - collectors/node.d.plugin/node_modules/lib/ + - collectors/node.d.plugin/node_modules/asn1-ber.js + - collectors/node.d.plugin/node_modules/extend.js + - collectors/node.d.plugin/node_modules/net-snmp.js + - collectors/node.d.plugin/node_modules/pixl-xml.js - web/lib/ - web/css/ test: diff --git a/CMakeLists.txt b/CMakeLists.txt index cee6c57b53..64ceb08c97 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -139,250 +139,254 @@ ENDIF(LINUX) # netdata files set(LIBNETDATA_FILES - src/libnetdata/adaptive_resortable_list.c - src/libnetdata/adaptive_resortable_list.h - src/libnetdata/appconfig.c - src/libnetdata/appconfig.h - src/libnetdata/avl.c - src/libnetdata/avl.h - src/libnetdata/clocks.c - src/libnetdata/clocks.h - src/libnetdata/common.c - src/libnetdata/dictionary.c - src/libnetdata/dictionary.h - src/libnetdata/eval.c - src/libnetdata/eval.h - src/libnetdata/inlined.h - src/libnetdata/libnetdata.h - src/libnetdata/locks.c - src/libnetdata/locks.h - src/libnetdata/log.c - src/libnetdata/log.h - src/libnetdata/os.c - src/libnetdata/os.h - src/libnetdata/popen.c - src/libnetdata/popen.h - src/libnetdata/procfile.c - src/libnetdata/procfile.h - src/libnetdata/simple_pattern.c - src/libnetdata/simple_pattern.h - src/libnetdata/socket.c - src/libnetdata/socket.h - src/libnetdata/statistical.c - src/libnetdata/statistical.h - src/libnetdata/storage_number.c - src/libnetdata/storage_number.h - src/libnetdata/threads.c - src/libnetdata/threads.h - src/libnetdata/web_buffer.c - src/libnetdata/web_buffer.h - src/libnetdata/url.c - src/libnetdata/url.h + libnetdata/adaptive_resortable_list/adaptive_resortable_list.c + libnetdata/adaptive_resortable_list/adaptive_resortable_list.h + libnetdata/config/appconfig.c + libnetdata/config/appconfig.h + libnetdata/avl/avl.c + libnetdata/avl/avl.h + libnetdata/buffer/buffer.c + libnetdata/buffer/buffer.h + libnetdata/clocks/clocks.c + libnetdata/clocks/clocks.h + libnetdata/dictionary/dictionary.c + libnetdata/dictionary/dictionary.h + libnetdata/eval/eval.c + libnetdata/eval/eval.h + libnetdata/inlined.h + libnetdata/libnetdata.c + libnetdata/libnetdata.h + libnetdata/locks/locks.c + libnetdata/locks/locks.h + libnetdata/log/log.c + libnetdata/log/log.h + libnetdata/os.c + libnetdata/os.h + libnetdata/popen/popen.c + libnetdata/popen/popen.h + libnetdata/procfile/procfile.c + libnetdata/procfile/procfile.h + libnetdata/simple_pattern/simple_pattern.c + libnetdata/simple_pattern/simple_pattern.h + libnetdata/socket/socket.c + libnetdata/socket/socket.h + libnetdata/statistical/statistical.c + libnetdata/statistical/statistical.h + libnetdata/storage_number/storage_number.c + libnetdata/storage_number/storage_number.h + libnetdata/threads/threads.c + libnetdata/threads/threads.h + libnetdata/url/url.c + libnetdata/url/url.h ) add_library(libnetdata OBJECT ${LIBNETDATA_FILES}) set(APPS_PLUGIN_FILES - src/plugins/apps.plugin/apps_plugin.c + collectors/apps.plugin/apps_plugin.c ) set(CHECKS_PLUGIN_FILES - src/plugins/checks.plugin/plugin_checks.c - src/plugins/checks.plugin/plugin_checks.h + collectors/checks.plugin/plugin_checks.c + collectors/checks.plugin/plugin_checks.h ) set(FREEBSD_PLUGIN_FILES - src/plugins/freebsd.plugin/plugin_freebsd.c - src/plugins/freebsd.plugin/plugin_freebsd.h - src/plugins/freebsd.plugin/freebsd_sysctl.c - src/plugins/freebsd.plugin/freebsd_getmntinfo.c - src/plugins/freebsd.plugin/freebsd_getifaddrs.c - src/plugins/freebsd.plugin/freebsd_devstat.c - src/plugins/freebsd.plugin/freebsd_kstat_zfs.c - src/plugins/freebsd.plugin/freebsd_ipfw.c - src/plugins/linux-proc.plugin/zfs_common.c - src/plugins/linux-proc.plugin/zfs_common.h + collectors/freebsd.plugin/plugin_freebsd.c + collectors/freebsd.plugin/plugin_freebsd.h + collectors/freebsd.plugin/freebsd_sysctl.c + collectors/freebsd.plugin/freebsd_getmntinfo.c + collectors/freebsd.plugin/freebsd_getifaddrs.c + collectors/freebsd.plugin/freebsd_devstat.c + collectors/freebsd.plugin/freebsd_kstat_zfs.c + collectors/freebsd.plugin/freebsd_ipfw.c + collectors/proc.plugin/zfs_common.c + collectors/proc.plugin/zfs_common.h ) set(HEALTH_PLUGIN_FILES - src/health/health.c - src/health/health.h - src/health/health_config.c - src/health/health_json.c - src/health/health_log.c + health/health.c + health/health.h + health/health_config.c + health/health_json.c + health/health_log.c ) set(IDLEJITTER_PLUGIN_FILES - src/plugins/idlejitter.plugin/plugin_idlejitter.c - src/plugins/idlejitter.plugin/plugin_idlejitter.h + collectors/idlejitter.plugin/plugin_idlejitter.c + collectors/idlejitter.plugin/plugin_idlejitter.h ) set(CGROUPS_PLUGIN_FILES - src/plugins/linux-cgroups.plugin/sys_fs_cgroup.c - src/plugins/linux-cgroups.plugin/sys_fs_cgroup.h + collectors/cgroups.plugin/sys_fs_cgroup.c + collectors/cgroups.plugin/sys_fs_cgroup.h ) set(CGROUP_NETWORK_FILES - src/plugins/linux-cgroups.plugin/cgroup-network.c + collectors/cgroups.plugin/cgroup-network.c ) set(DISKSPACE_PLUGIN_FILES - src/plugins/linux-diskspace.plugin/plugin_diskspace.h - src/plugins/linux-diskspace.plugin/plugin_diskspace.c + collectors/diskspace.plugin/plugin_diskspace.h + collectors/diskspace.plugin/plugin_diskspace.c ) set(FREEIPMI_PLUGIN_FILES - src/plugins/linux-freeipmi.plugin/freeipmi_plugin.c + collectors/freeipmi.plugin/freeipmi_plugin.c ) set(NFACCT_PLUGIN_FILES - src/plugins/linux-nfacct.plugin/plugin_nfacct.c - src/plugins/linux-nfacct.plugin/plugin_nfacct.h + collectors/nfacct.plugin/plugin_nfacct.c + collectors/nfacct.plugin/plugin_nfacct.h ) set(PROC_PLUGIN_FILES - src/plugins/linux-proc.plugin/ipc.c - src/plugins/linux-proc.plugin/plugin_proc.c - src/plugins/linux-proc.plugin/plugin_proc.h - src/plugins/linux-proc.plugin/proc_diskstats.c - src/plugins/linux-proc.plugin/proc_interrupts.c - src/plugins/linux-proc.plugin/proc_softirqs.c - src/plugins/linux-proc.plugin/proc_loadavg.c - src/plugins/linux-proc.plugin/proc_meminfo.c - src/plugins/linux-proc.plugin/proc_net_dev.c - src/plugins/linux-proc.plugin/proc_net_ip_vs_stats.c - src/plugins/linux-proc.plugin/proc_net_netstat.c - src/plugins/linux-proc.plugin/proc_net_rpc_nfs.c - src/plugins/linux-proc.plugin/proc_net_rpc_nfsd.c - src/plugins/linux-proc.plugin/proc_net_snmp.c - src/plugins/linux-proc.plugin/proc_net_snmp6.c - src/plugins/linux-proc.plugin/proc_net_sctp_snmp.c - src/plugins/linux-proc.plugin/proc_net_sockstat.c - src/plugins/linux-proc.plugin/proc_net_sockstat6.c - src/plugins/linux-proc.plugin/proc_net_softnet_stat.c - src/plugins/linux-proc.plugin/proc_net_stat_conntrack.c - src/plugins/linux-proc.plugin/proc_net_stat_synproxy.c - src/plugins/linux-proc.plugin/proc_self_mountinfo.c - src/plugins/linux-proc.plugin/proc_self_mountinfo.h - src/plugins/linux-proc.plugin/zfs_common.c - src/plugins/linux-proc.plugin/zfs_common.h - src/plugins/linux-proc.plugin/proc_spl_kstat_zfs.c - src/plugins/linux-proc.plugin/proc_stat.c - src/plugins/linux-proc.plugin/proc_sys_kernel_random_entropy_avail.c - src/plugins/linux-proc.plugin/proc_vmstat.c - src/plugins/linux-proc.plugin/proc_uptime.c - src/plugins/linux-proc.plugin/sys_kernel_mm_ksm.c - src/plugins/linux-proc.plugin/sys_devices_system_edac_mc.c - src/plugins/linux-proc.plugin/sys_devices_system_node.c - src/plugins/linux-proc.plugin/sys_fs_btrfs.c + collectors/proc.plugin/ipc.c + collectors/proc.plugin/plugin_proc.c + collectors/proc.plugin/plugin_proc.h + collectors/proc.plugin/proc_diskstats.c + collectors/proc.plugin/proc_interrupts.c + collectors/proc.plugin/proc_softirqs.c + collectors/proc.plugin/proc_loadavg.c + collectors/proc.plugin/proc_meminfo.c + collectors/proc.plugin/proc_net_dev.c + collectors/proc.plugin/proc_net_ip_vs_stats.c + collectors/proc.plugin/proc_net_netstat.c + collectors/proc.plugin/proc_net_rpc_nfs.c + collectors/proc.plugin/proc_net_rpc_nfsd.c + collectors/proc.plugin/proc_net_snmp.c + collectors/proc.plugin/proc_net_snmp6.c + collectors/proc.plugin/proc_net_sctp_snmp.c + collectors/proc.plugin/proc_net_sockstat.c + collectors/proc.plugin/proc_net_sockstat6.c + collectors/proc.plugin/proc_net_softnet_stat.c + collectors/proc.plugin/proc_net_stat_conntrack.c + collectors/proc.plugin/proc_net_stat_synproxy.c + collectors/proc.plugin/proc_self_mountinfo.c + collectors/proc.plugin/proc_self_mountinfo.h + collectors/proc.plugin/zfs_common.c + collectors/proc.plugin/zfs_common.h + collectors/proc.plugin/proc_spl_kstat_zfs.c + collectors/proc.plugin/proc_stat.c + collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c + collectors/proc.plugin/proc_vmstat.c + collectors/proc.plugin/proc_uptime.c + collectors/proc.plugin/sys_kernel_mm_ksm.c + collectors/proc.plugin/sys_devices_system_edac_mc.c + collectors/proc.plugin/sys_devices_system_node.c + collectors/proc.plugin/sys_fs_btrfs.c ) set(TC_PLUGIN_FILES - src/plugins/linux-tc.plugin/plugin_tc.c - src/plugins/linux-tc.plugin/plugin_tc.h + collectors/tc.plugin/plugin_tc.c + collectors/tc.plugin/plugin_tc.h ) set(MACOS_PLUGIN_FILES - src/plugins/macos.plugin/plugin_macos.c - src/plugins/macos.plugin/plugin_macos.h - src/plugins/macos.plugin/macos_sysctl.c - src/plugins/macos.plugin/macos_mach_smi.c - src/plugins/macos.plugin/macos_fw.c + collectors/macos.plugin/plugin_macos.c + collectors/macos.plugin/plugin_macos.h + collectors/macos.plugin/macos_sysctl.c + collectors/macos.plugin/macos_mach_smi.c + collectors/macos.plugin/macos_fw.c ) set(PLUGINSD_PLUGIN_FILES - src/plugins/plugins.d.plugin/plugins_d.c - src/plugins/plugins.d.plugin/plugins_d.h + collectors/plugins.d/plugins_d.c + collectors/plugins.d/plugins_d.h ) set(REGISTRY_PLUGIN_FILES - src/registry/registry.c - src/registry/registry.h - src/registry/registry_db.c - src/registry/registry_init.c - src/registry/registry_internals.c - src/registry/registry_internals.h - src/registry/registry_log.c - src/registry/registry_machine.c - src/registry/registry_machine.h - src/registry/registry_person.c - src/registry/registry_person.h - src/registry/registry_url.c - src/registry/registry_url.h + registry/registry.c + registry/registry.h + registry/registry_db.c + registry/registry_init.c + registry/registry_internals.c + registry/registry_internals.h + registry/registry_log.c + registry/registry_machine.c + registry/registry_machine.h + registry/registry_person.c + registry/registry_person.h + registry/registry_url.c + registry/registry_url.h ) set(STATSD_PLUGIN_FILES - src/plugins/statsd.plugin/statsd.c - src/plugins/statsd.plugin/statsd.h + collectors/statsd.plugin/statsd.c + collectors/statsd.plugin/statsd.h ) set(RRD_PLUGIN_FILES - src/database/rrdcalc.c - src/database/rrdcalc.h - src/database/rrdcalctemplate.c - src/database/rrdcalctemplate.h - src/database/rrddim.c - src/database/rrddimvar.c - src/database/rrddimvar.h - src/database/rrdfamily.c - src/database/rrdhost.c - src/database/rrd.c - src/database/rrd.h - src/database/rrdset.c - src/database/rrdsetvar.c - src/database/rrdsetvar.h - src/database/rrdvar.c - src/database/rrdvar.h + database/rrdcalc.c + database/rrdcalc.h + database/rrdcalctemplate.c + database/rrdcalctemplate.h + database/rrddim.c + database/rrddimvar.c + database/rrddimvar.h + database/rrdfamily.c + database/rrdhost.c + database/rrd.c + database/rrd.h + database/rrdset.c + database/rrdsetvar.c + database/rrdsetvar.h + database/rrdvar.c + database/rrdvar.h ) set(WEB_PLUGIN_FILES - src/webserver/web_client.c - src/webserver/web_client.h - src/webserver/web_server.c - src/webserver/web_server.h - ) + web/server/web_client.c + web/server/web_client.h + web/server/web_server.c + web/server/web_server.h + web/server/single/single-threaded.c web/server/single/single-threaded.h web/server/multi/multi-threaded.c web/server/multi/multi-threaded.h web/server/static/static-threaded.c web/server/static/static-threaded.h web/server/web_client_cache.c web/server/web_client_cache.h) set(API_PLUGIN_FILES - src/api/rrd2json.c - src/api/rrd2json.h - src/api/web_api_v1.c - src/api/web_api_v1.h - src/api/web_buffer_svg.c - src/api/web_buffer_svg.h + web/api/rrd2json.c + web/api/rrd2json.h + web/api/web_api_v1.c + web/api/web_api_v1.h + web/api/web_buffer_svg.c + web/api/web_buffer_svg.h ) set(STREAMING_PLUGIN_FILES - src/streaming/rrdpush.c - src/streaming/rrdpush.h + streaming/rrdpush.c + streaming/rrdpush.h ) set(BACKENDS_PLUGIN_FILES - src/backends/backends.c - src/backends/backends.h - src/backends/graphite/graphite.c - src/backends/graphite/graphite.h - src/backends/json/json.c - src/backends/json/json.h - src/backends/opentsdb/opentsdb.c - src/backends/opentsdb/opentsdb.h - src/backends/prometheus/backend_prometheus.c - src/backends/prometheus/backend_prometheus.h + backends/backends.c + backends/backends.h + backends/graphite/graphite.c + backends/graphite/graphite.h + backends/json/json.c + backends/json/json.h + backends/opentsdb/opentsdb.c + backends/opentsdb/opentsdb.h + backends/prometheus/backend_prometheus.c + backends/prometheus/backend_prometheus.h + ) + +set(DAEMON_FILES + daemon/common.c + daemon/common.h + daemon/daemon.c + daemon/daemon.h + daemon/global_statistics.c + daemon/global_statistics.h + daemon/main.c + daemon/main.h + daemon/signals.c + daemon/signals.h + daemon/unit_test.c + daemon/unit_test.h ) set(NETDATA_FILES - src/plugins/all.h - src/common.c - src/common.h - src/daemon.c - src/daemon.h - src/global_statistics.c - src/global_statistics.h - src/main.c - src/main.h - src/signals.c - src/signals.h - src/unit_test.c - src/unit_test.h + collectors/all.h + ${DAEMON_FILES} ${API_PLUGIN_FILES} ${BACKENDS_PLUGIN_FILES} ${CHECKS_PLUGIN_FILES} diff --git a/Makefile.am b/Makefile.am index 3bac6a6262..c80aa0f5a0 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,8 +1,6 @@ -# -# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com> # SPDX-License-Identifier: GPL-3.0-or-later -# -AUTOMAKE_OPTIONS=foreign 1.10 + +AUTOMAKE_OPTIONS=foreign subdir-objects 1.10 ACLOCAL_AMFLAGS = -I build/m4 MAINTAINERCLEANFILES= \ @@ -47,16 +45,9 @@ EXTRA_DIST = \ $(NULL) SUBDIRS = \ - charts.d \ - conf.d \ diagrams \ makeself \ - node.d \ - plugins.d \ - python.d \ - src \ system \ - web \ contrib \ tests \ $(NULL) @@ -79,3 +70,369 @@ dist_noinst_SCRIPTS= \ netdata-installer.sh \ installer/functions.sh \ $(NULL) + +# ----------------------------------------------------------------------------- +# Compile netdata binaries + +SUBDIRS += \ + backends \ + collectors \ + database \ + health \ + libnetdata \ + registry \ + streaming \ + web \ + $(NULL) + + +AM_CFLAGS = \ + $(OPTIONAL_MATH_CFLAGS) \ + $(OPTIONAL_NFACCT_CLFAGS) \ + $(OPTIONAL_ZLIB_CFLAGS) \ + $(OPTIONAL_UUID_CFLAGS) \ + $(OPTIONAL_LIBCAP_LIBS) \ + $(OPTIONAL_IPMIMONITORING_CFLAGS) \ + $(NULL) + +sbin_PROGRAMS = +dist_cache_DATA = installer/.keep +dist_varlib_DATA = installer/.keep +dist_registry_DATA = installer/.keep +dist_log_DATA = installer/.keep +plugins_PROGRAMS = + +LIBNETDATA_FILES = \ + libnetdata/adaptive_resortable_list/adaptive_resortable_list.c \ + libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \ + libnetdata/config/appconfig.c \ + libnetdata/config/appconfig.h \ + libnetdata/avl/avl.c \ + libnetdata/avl/avl.h \ + libnetdata/buffer/buffer.c \ + libnetdata/buffer/buffer.h \ + libnetdata/clocks/clocks.c \ + libnetdata/clocks/clocks.h \ + libnetdata/dictionary/dictionary.c \ + libnetdata/dictionary/dictionary.h \ + libnetdata/eval/eval.c \ + libnetdata/eval/eval.h \ + libnetdata/inlined.h \ + libnetdata/libnetdata.c \ + libnetdata/libnetdata.h \ + libnetdata/locks/locks.c \ + libnetdata/locks/locks.h \ + libnetdata/log/log.c \ + libnetdata/log/log.h \ + libnetdata/popen/popen.c \ + libnetdata/popen/popen.h \ + libnetdata/procfile/procfile.c \ + libnetdata/procfile/procfile.h \ + libnetdata/os.c \ + libnetdata/os.h \ + libnetdata/simple_pattern/simple_pattern.c \ + libnetdata/simple_pattern/simple_pattern.h \ + libnetdata/socket/socket.c \ + libnetdata/socket/socket.h \ + libnetdata/statistical/statistical.c \ + libnetdata/statistical/statistical.h \ + libnetdata/storage_number/storage_number.c \ + libnetdata/storage_number/storage_number.h \ + libnetdata/threads/threads.c \ + libnetdata/threads/threads.h \ + libnetdata/url/url.c \ + libnetdata/url/url.h \ + $(NULL) + +APPS_PLUGIN_FILES = \ + collectors/apps.plugin/apps_plugin.c \ + $(LIBNETDATA_FILES) \ + $(NULL) + +CHECKS_PLUGIN_FILES = \ + collectors/checks.plugin/plugin_checks.c \ + collectors/checks.plugin/plugin_checks.h \ + $(NULL) + +FREEBSD_PLUGIN_FILES = \ + collectors/freebsd.plugin/plugin_freebsd.c \ + collectors/freebsd.plugin/plugin_freebsd.h \ + collectors/freebsd.plugin/freebsd_sysctl.c \ + collectors/freebsd.plugin/freebsd_getmntinfo.c \ + collectors/freebsd.plugin/freebsd_getifaddrs.c \ + collectors/freebsd.plugin/freebsd_devstat.c \ + collectors/freebsd.plugin/freebsd_kstat_zfs.c \ + collectors/freebsd.plugin/freebsd_ipfw.c \ + collectors/proc.plugin/zfs_common.c \ + collectors/proc.plugin/zfs_common.h \ + $(NULL) + +HEALTH_PLUGIN_FILES = \ + health/health.c \ + health/health.h \ + health/health_config.c \ + health/health_json.c \ + health/health_log.c \ + $(NULL) + +IDLEJITTER_PLUGIN_FILES = \ + collectors/idlejitter.plugin/plugin_idlejitter.c \ + collectors/idlejitter.plugin/plugin_idlejitter.h \ + $(NULL) + +CGROUPS_PLUGIN_FILES = \ + collectors/cgroups.plugin/sys_fs_cgroup.c \ + collectors/cgroups.plugin/sys_fs_cgroup.h \ + $(NULL) + +CGROUP_NETWORK_FILES = \ + collectors/cgroups.plugin/cgroup-network.c \ + $(LIBNETDATA_FILES) \ + $(NULL) + +DISKSPACE_PLUGIN_FILES = \ + collectors/diskspace.plugin/plugin_diskspace.h \ + collectors/diskspace.plugin/plugin_diskspace.c \ + $(NULL) + +FREEIPMI_PLUGIN_FILES = \ + collectors/freeipmi.plugin/freeipmi_plugin.c \ + $(LIBNETDATA_FILES) \ + $(NULL) + +NFACCT_PLUGIN_FILES = \ + collectors/nfacct.plugin/plugin_nfacct.c \ + collectors/nfacct.plugin/plugin_nfacct.h \ + $(NULL) + +PROC_PLUGIN_FILES = \ + collectors/proc.plugin/ipc.c \ + collectors/proc.plugin/plugin_proc.c \ + collectors/proc.plugin/plugin_proc.h \ + collectors/proc.plugin/proc_diskstats.c \ + collectors/proc.plugin/proc_interrupts.c \ + collectors/proc.plugin/proc_softirqs.c \ + collectors/proc.plugin/proc_loadavg.c \ + collectors/proc.plugin/proc_meminfo.c \ + collectors/proc.plugin/proc_net_dev.c \ + collectors/proc.plugin/proc_net_ip_vs_stats.c \ + collectors/proc.plugin/proc_net_netstat.c \ + collectors/proc.plugin/proc_net_rpc_nfs.c \ + collectors/proc.plugin/proc_net_rpc_nfsd.c \ + collectors/proc.plugin/proc_net_snmp.c \ + collectors/proc.plugin/proc_net_snmp6.c \ + collectors/proc.plugin/proc_net_sctp_snmp.c \ + collectors/proc.plugin/proc_net_sockstat.c \ + collectors/proc.plugin/proc_net_sockstat6.c \ + collectors/proc.plugin/proc_net_softnet_stat.c \ + collectors/proc.plugin/proc_net_stat_conntrack.c \ + collectors/proc.plugin/proc_net_stat_synproxy.c \ + collectors/proc.plugin/proc_self_mountinfo.c \ + collectors/proc.plugin/proc_self_mountinfo.h \ + collectors/proc.plugin/zfs_common.c \ + collectors/proc.plugin/zfs_common.h \ + collectors/proc.plugin/proc_spl_kstat_zfs.c \ + collectors/proc.plugin/proc_stat.c \ + collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c \ + collectors/proc.plugin/proc_vmstat.c \ + collectors/proc.plugin/proc_uptime.c \ + collectors/proc.plugin/sys_kernel_mm_ksm.c \ + collectors/proc.plugin/sys_devices_system_edac_mc.c \ + collectors/proc.plugin/sys_devices_system_node.c \ + collectors/proc.plugin/sys_fs_btrfs.c \ + $(NULL) + +TC_PLUGIN_FILES = \ + collectors/tc.plugin/plugin_tc.c \ + collectors/tc.plugin/plugin_tc.h \ + $(NULL) + +MACOS_PLUGIN_FILES = \ + collectors/macos.plugin/plugin_macos.c \ + collectors/macos.plugin/plugin_macos.h \ + collectors/macos.plugin/macos_sysctl.c \ + collectors/macos.plugin/macos_mach_smi.c \ + collectors/macos.plugin/macos_fw.c \ + $(NULL) + +PLUGINSD_PLUGIN_FILES = \ + collectors/plugins.d/plugins_d.c \ + collectors/plugins.d/plugins_d.h \ + $(NULL) + +RRD_PLUGIN_FILES = \ + database/rrdcalc.c \ + database/rrdcalc.h \ + database/rrdcalctemplate.c \ + database/rrdcalctemplate.h \ + database/rrddim.c \ + database/rrddimvar.c \ + database/rrddimvar.h \ + database/rrdfamily.c \ + database/rrdhost.c \ + database/rrd.c \ + database/rrd.h \ + database/rrdset.c \ + database/rrdsetvar.c \ + database/rrdsetvar.h \ + database/rrdvar.c \ + database/rrdvar.h \ + $(NULL) + +API_PLUGIN_FILES = \ + web/api/rrd2json.c \ + web/api/rrd2json.h \ + web/api/web_api_v1.c \ + web/api/web_api_v1.h \ + web/api/web_buffer_svg.c \ + web/api/web_buffer_svg.h \ + $(NULL) + +STREAMING_PLUGIN_FILES = \ + streaming/rrdpush.c \ + streaming/rrdpush.h \ + $(NULL) + +REGISTRY_PLUGIN_FILES = \ + registry/registry.c \ + registry/registry.h \ + registry/registry_db.c \ + registry/registry_init.c \ + registry/registry_internals.c \ + registry/registry_internals.h \ + registry/registry_log.c \ + registry/registry_machine.c \ + registry/registry_machine.h \ + registry/registry_person.c \ + registry/registry_person.h \ + registry/registry_url.c \ + registry/registry_url.h \ + $(NULL) + +STATSD_PLUGIN_FILES = \ + collectors/statsd.plugin/statsd.c \ + collectors/statsd.plugin/statsd.h \ + $(NULL) + +WEB_PLUGIN_FILES = \ + web/server/web_client.c \ + web/server/web_client.h \ + web/server/web_server.c \ + web/server/web_server.h \ + web/server/web_client_cache.c \ + web/server/web_client_cache.h \ + web/server/single/single-threaded.c \ + web/server/single/single-threaded.h \ + web/server/multi/multi-threaded.c \ + web/server/multi/multi-threaded.h \ + web/server/static/static-threaded.c \ + web/server/static/static-threaded.h \ + $(NULL) + +BACKENDS_PLUGIN_FILES = \ + backends/backends.c \ + backends/backends.h \ + backends/graphite/graphite.c \ + backends/graphite/graphite.h \ + backends/json/json.c \ + backends/json/json.h \ + backends/opentsdb/opentsdb.c \ + backends/opentsdb/opentsdb.h \ + backends/prometheus/backend_prometheus.c \ + backends/prometheus/backend_prometheus.h \ + $(NULL) + +DAEMON_FILES = \ + daemon/common.c \ + daemon/common.h \ + daemon/daemon.c \ + daemon/daemon.h \ + daemon/global_statistics.c \ + daemon/global_statistics.h \ + daemon/main.c \ + daemon/main.h \ + daemon/signals.c \ + daemon/signals.h \ + daemon/unit_test.c \ + daemon/unit_test.h \ + $(NULL) + +NETDATA_FILES = \ + collectors/all.h \ + $(DAEMON_FILES) \ + $(LIBNETDATA_FILES) \ + $(API_PLUGIN_FILES) \ + $(BACKENDS_PLUGIN_FILES) \ + $(CHECKS_PLUGIN_FILES) \ + $(HEALTH_PLUGIN_FILES) \ + $(IDLEJITTER_PLUGIN_FILES) \ + $(PLUGINSD_PLUGIN_FILES) \ + $(REGISTRY_PLUGIN_FILES) \ + $(RRD_PLUGIN_FILES) \ + $(STREAMING_PLUGIN_FILES) \ + $(STATSD_PLUGIN_FILES) \ + $(WEB_PLUGIN_FILES) \ + $(NULL) + +if FREEBSD + NETDATA_FILES += \ + $(FREEBSD_PLUGIN_FILES) \ + $(NULL) +endif + +if MACOS + NETDATA_FILES += \ + $(MACOS_PLUGIN_FILES) \ + $(NULL) +endif + +if LINUX + NETDATA_FILES += \ + $(CGROUPS_PLUGIN_FILES) \ + $(DISKSPACE_PLUGIN_FILES) \ + $(NFACCT_PLUGIN_FILES) \ + $(PROC_PLUGIN_FILES) \ + $(TC_PLUGIN_FILES) \ + $(NULL) + +endif + +NETDATA_COMMON_LIBS = \ + $(OPTIONAL_MATH_LIBS) \ + $(OPTIONAL_ZLIB_LIBS) \ + $(OPTIONAL_UUID_LIBS) \ + $(NULL) + + +sbin_PROGRAMS += netdata +netdata_SOURCES = ../config.h $(NETDATA_FILES) +netdata_LDADD = \ + $(NETDATA_COMMON_LIBS) \ + $(OPTIONAL_NFACCT_LIBS) \ + $(NULL) + +if ENABLE_PLUGIN_APPS + plugins_PROGRAMS += apps.plugin + apps_plugin_SOURCES = ../config.h $(APPS_PLUGIN_FILES) + apps_plugin_LDADD = \ + $(NETDATA_COMMON_LIBS) \ + $(OPTIONAL_LIBCAP_LIBS) \ + $(NULL) +endif + +if ENABLE_PLUGIN_CGROUP_NETWORK + plugins_PROGRAMS += cgroup-network + cgroup_network_SOURCES = ../config.h $(CGROUP_NETWORK_FILES) + cgroup_network_LDADD = \ + $(NETDATA_COMMON_LIBS) \ + $(NULL) +endif + +if ENABLE_PLUGIN_FREEIPMI + plugins_PROGRAMS += freeipmi.plugin + freeipmi_plugin_SOURCES = ../config.h $(FREEIPMI_PLUGIN_FILES) + freeipmi_plugin_LDADD = \ + $(NETDATA_COMMON_LIBS) \ + $(OPTIONAL_IPMIMONITORING_LIBS) \ + $(NULL) +endif diff --git a/src/backends/Makefile.am b/backends/Makefile.am similarity index 53% rename from src/backends/Makefile.am rename to backends/Makefile.am index 4ce2a71294..268259edd7 100644 --- a/src/backends/Makefile.am +++ b/backends/Makefile.am @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-3.0-or-later AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in SUBDIRS = \ graphite \ @@ -9,3 +9,11 @@ SUBDIRS = \ opentsdb \ prometheus \ $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +dist_noinst_SCRIPTS = \ + nc-backend.sh \ + $(NULL) diff --git a/backends/README.md b/backends/README.md new file mode 100644 index 0000000000..e514c2b8fe --- /dev/null +++ b/backends/README.md @@ -0,0 +1,137 @@ + +netdata supports backends for archiving the metrics, or providing long term dashboards, using grafana or other tools, like this: + + + +Since netdata collects thousands of metrics per server per second, which would easily congest any backend server when several netdata servers are sending data to it, netdata allows sending metrics at a lower frequency. So, although netdata collects metrics every second, it can send to the backend servers averages or sums every X seconds (though, it can send them per second if you need it to). + +## features + +1. Supported backends + + 1. **graphite** (`plaintext interface`, used by **Graphite**, **InfluxDB**, **KairosDB**, **Blueflood**, **ElasticSearch** via logstash tcp input and the graphite codec, etc) + + metrics are sent to the backend server as `prefix.hostname.chart.dimension`. `prefix` is configured below, `hostname` is the hostname of the machine (can also be configured). + + 2. **opentsdb** (`telnet interface`, used by **OpenTSDB**, **InfluxDB**, **KairosDB**, etc) + + metrics are sent to opentsdb as `prefix.chart.dimension` with tag `host=hostname`. + + 3. **json** document DBs + + metrics are sent to a document db, `JSON` formatted. + + 4. **prometheus** is described at [prometheus page](prometheus/) since it pulls data from netdata. + +2. Only one backend may be active at a time. + +3. All metrics are transferred to the backend - netdata does not implement any metric filtering. + +4. Three modes of operation (for all backends): + + 1. `as collected`: the latest collected value is sent to the backend. This means that if netdata is configured to send data to the backend every 10 seconds, only 1 out of 10 values will appear at the backend server. The values are sent exactly as collected, before any multipliers or dividers applied and before any interpolation. This mode emulates other data collectors, such as `collectd`. + + 2. `average`: the average of the interpolated values shown on the netdata graphs is sent to the backend. So, if netdata is configured to send data to the backend server every 10 seconds, the average of the 10 values shown on the netdata charts will be used. **If you can't decide which mode to use, use `average`.** + + 3. `sum` or `volume`: the sum of the interpolated values shown on the netdata graphs is sent to the backend. So, if netdata is configured to send data to the backend every 10 seconds, the sum of the 10 values shown on the netdata charts will be used. + +5. This code is smart enough, not to slow down netdata, independently of the speed of the backend server. + +## configuration + +In `/etc/netdata/netdata.conf` you should have something like this (if not download the latest version of `netdata.conf` from your netdata): + +``` +[backend] + enabled = yes | no + type = graphite | opentsdb | json + host tags = list of TAG=VALUE + destination = space separated list of [PROTOCOL:]HOST[:PORT] - the first working will be used + data source = average | sum | as collected + prefix = netdata + hostname = my-name + update every = 10 + buffer on failures = 10 + timeout ms = 20000 + send charts matching = * + send hosts matching = localhost * + send names instead of ids = yes +``` + +- `enabled = yes | no`, enables or disables sending data to a backend + +- `type = graphite | opentsdb | json`, selects the backend type + +- `destination = host1 host2 host3 ...`, accepts **a space separated list** of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the **first available** to send the metrics. + + The format of each item in this list, is: `[PROTOCOL:]IP[:PORT]`. + + `PROTOCOL` can be `udp` or `tcp`. `tcp` is the default and only supported by the current backends. + + `IP` can be `XX.XX.XX.XX` (IPv4), or `[XX:XX...XX:XX]` (IPv6). For IPv6 you can to enclose the IP in `[]` to separate it from the port. + + `PORT` can be a number of a service name. If omitted, the default port for the backend will be used (graphite = 2003, opentsdb = 4242). + + Example IPv4: + +``` + destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242 +``` + + Example IPv6 and IPv4 together: + +``` + destination = [ffff:...:0001]:2003 10.11.12.1:2003 +``` + + When multiple servers are defined, netdata will try the next one when the first one fails. This allows you to load-balance different servers: give your backend servers in different order on each netdata. + + netdata also ships [`nc-backend.sh`](https://github.com/netdata/netdata/blob/master/contrib/nc-backend.sh), a script that can be used as a fallback backend to save the metrics to disk and push them to the time-series database when it becomes available again. It can also be used to monitor / trace / debug the metrics netdata generates. + +- `data source = as collected`, or `data source = average`, or `data source = sum`, selects the kind of data that will be sent to the backend. + +- `hostname = my-name`, is the hostname to be used for sending data to the backend server. By default this is `[global].hostname`. + +- `prefix = netdata`, is the prefix to add to all metrics. + +- `update every = 10`, is the number of seconds between sending data to the backend. netdata will add some randomness to this number, to prevent stressing the backend server when many netdata servers send data to the same backend. This randomness does not affect the quality of the data, only the time they are sent. + +- `buffer on failures = 10`, is the number of iterations (each iteration is `[backend].update every` seconds) to buffer data, when the backend is not available. If the backend fails to receive the data after that many failures, data loss on the backend is expected (netdata will also log it). + +- `timeout ms = 20000`, is the timeout in milliseconds to wait for the backend server to process the data. By default this is `2 * update_every * 1000`. + +- `send hosts matching = localhost *` includes one or more space separated patterns, using ` * ` as wildcard (any number of times within each pattern). The patterns are checked against the hostname (the localhost is always checked as `localhost`), allowing us to filter which hosts will be sent to the backend when this netdata is a central netdata aggregating multiple hosts. A pattern starting with ` ! ` gives a negative match. So to match all hosts named `*db*` except hosts containing `*slave*`, use `!*slave* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative). + +- `send charts matching = *` includes one or more space separated patterns, using ` * ` as wildcard (any number of times within each pattern). The patterns are checked against both chart id and chart name. A pattern starting with ` ! ` gives a negative match. So to match all charts named `apps.*` except charts ending in `*reads`, use `!*reads apps.*` (so, the order is important: the first pattern matching the chart id or the chart name will be used - positive or negative). + +- `send names instead of ids = yes | no` controls the metric names netdata should send to backend. netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are different: disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc. + +- `host tags = list of TAG=VALUE` defines tags that should be appended on all metrics for the given host. These are currently only sent to opentsdb and prometheus. Please use the appropriate format for each time-series db. For example opentsdb likes them like `TAG1=VALUE1 TAG2=VALUE2`, but prometheus like `tag1="value1",tag2="value2"`. Host tags are mirrored with database replication (streaming of metrics between netdata servers). + +## monitoring operation + +netdata provides 5 charts: + +1. **Buffered metrics**, the number of metrics netdata added to the buffer for dispatching them to the backend server. +2. **Buffered data size**, the amount of data (in KB) netdata added the buffer. +3. ~~**Backend latency**, the time the backend server needed to process the data netdata sent. If there was a re-connection involved, this includes the connection time.~~ (this chart has been removed, because it only measures the time netdata needs to give the data to the O/S - since the backend servers do not ack the reception, netdata does not have any means to measure this properly) +4. **Backend operations**, the number of operations performed by netdata. +5. **Backend thread CPU usage**, the CPU resources consumed by the netdata thread, that is responsible for sending the metrics to the backend server. + + + +## alarms + +The latest version of the alarms configuration for monitoring the backend is here: https://github.com/netdata/netdata/blob/master/conf.d/health.d/backend.conf + +netdata adds 4 alarms: + +1. `backend_last_buffering`, number of seconds since the last successful buffering of backend data +2. `backend_metrics_sent`, percentage of metrics sent to the backend server +3. `backend_metrics_lost`, number of metrics lost due to repeating failures to contact the backend server +4. ~~`backend_slow`, the percentage of time between iterations needed by the backend time to process the data sent by netdata~~ (this was misleading and has been removed). + + + +## InfluxDB setup as netdata backend (example) +You can find blog post with example: how to use InfluxDB with netdata [here](https://blog.hda.me/2017/01/09/using-netdata-with-influxdb-backend.html) diff --git a/src/backends/backends.c b/backends/backends.c similarity index 99% rename from src/backends/backends.c rename to backends/backends.c index 03494d5948..6cb1e1c62a 100644 --- a/src/backends/backends.c +++ b/backends/backends.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../common.h" +#include "backends.h" // ---------------------------------------------------------------------------- // How backends work in netdata: diff --git a/src/backends/backends.h b/backends/backends.h similarity index 90% rename from src/backends/backends.h rename to backends/backends.h index 63520a86ed..9be4afd87c 100644 --- a/src/backends/backends.h +++ b/backends/backends.h @@ -3,7 +3,7 @@ #ifndef NETDATA_BACKENDS_H #define NETDATA_BACKENDS_H 1 -#include "../common.h" +#include "daemon/common.h" typedef enum backend_options { BACKEND_OPTION_NONE = 0, @@ -42,9 +42,9 @@ extern size_t backend_name_copy(char *d, const char *s, size_t usable); extern int discard_response(BUFFER *b, const char *backend); #endif // BACKENDS_INTERNALS -#include "prometheus/backend_prometheus.h" -#include "graphite/graphite.h" -#include "json/json.h" -#include "opentsdb/opentsdb.h" +#include "backends/prometheus/backend_prometheus.h" +#include "backends/graphite/graphite.h" +#include "backends/json/json.h" +#include "backends/opentsdb/opentsdb.h" #endif /* NETDATA_BACKENDS_H */ diff --git a/src/api/Makefile.am b/backends/graphite/Makefile.am similarity index 63% rename from src/api/Makefile.am rename to backends/graphite/Makefile.am index 8773fd098c..babdcf0df3 100644 --- a/src/api/Makefile.am +++ b/backends/graphite/Makefile.am @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-3.0-or-later AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in diff --git a/src/backends/graphite/graphite.c b/backends/graphite/graphite.c similarity index 100% rename from src/backends/graphite/graphite.c rename to backends/graphite/graphite.c diff --git a/src/backends/graphite/graphite.h b/backends/graphite/graphite.h similarity index 97% rename from src/backends/graphite/graphite.h rename to backends/graphite/graphite.h index 6b7f547c65..b7b0930fa9 100644 --- a/src/backends/graphite/graphite.h +++ b/backends/graphite/graphite.h @@ -4,7 +4,7 @@ #ifndef NETDATA_BACKEND_GRAPHITE_H #define NETDATA_BACKEND_GRAPHITE_H -#include "../backends.h" +#include "backends/backends.h" extern int format_dimension_collected_graphite_plaintext( BUFFER *b // the buffer to write data to diff --git a/src/backends/opentsdb/Makefile.am b/backends/json/Makefile.am similarity index 63% rename from src/backends/opentsdb/Makefile.am rename to backends/json/Makefile.am index 8773fd098c..babdcf0df3 100644 --- a/src/backends/opentsdb/Makefile.am +++ b/backends/json/Makefile.am @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-3.0-or-later AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in diff --git a/src/backends/json/json.c b/backends/json/json.c similarity index 100% rename from src/backends/json/json.c rename to backends/json/json.c diff --git a/src/backends/json/json.h b/backends/json/json.h similarity index 97% rename from src/backends/json/json.h rename to backends/json/json.h index 0a4c552423..11015652e2 100644 --- a/src/backends/json/json.h +++ b/backends/json/json.h @@ -3,7 +3,7 @@ #ifndef NETDATA_BACKEND_JSON_H #define NETDATA_BACKEND_JSON_H -#include "../backends.h" +#include "backends/backends.h" extern int format_dimension_collected_json_plaintext( BUFFER *b // the buffer to write data to diff --git a/contrib/nc-backend.sh b/backends/nc-backend.sh similarity index 96% rename from contrib/nc-backend.sh rename to backends/nc-backend.sh index 089b21accf..7280f86a06 100755 --- a/contrib/nc-backend.sh +++ b/backends/nc-backend.sh @@ -1,6 +1,10 @@ #!/usr/bin/env bash + # SPDX-License-Identifier: GPL-3.0-or-later +# This is a simple backend database proxy, written in BASH, using the nc command. +# Run the script without any parameters for help. + MODE="${1}" MY_PORT="${2}" BACKEND_HOST="${3}" diff --git a/src/backends/graphite/Makefile.am b/backends/opentsdb/Makefile.am similarity index 63% rename from src/backends/graphite/Makefile.am rename to backends/opentsdb/Makefile.am index 8773fd098c..babdcf0df3 100644 --- a/src/backends/graphite/Makefile.am +++ b/backends/opentsdb/Makefile.am @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-3.0-or-later AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in diff --git a/src/backends/opentsdb/opentsdb.c b/backends/opentsdb/opentsdb.c similarity index 100% rename from src/backends/opentsdb/opentsdb.c rename to backends/opentsdb/opentsdb.c diff --git a/src/backends/opentsdb/opentsdb.h b/backends/opentsdb/opentsdb.h similarity index 97% rename from src/backends/opentsdb/opentsdb.h rename to backends/opentsdb/opentsdb.h index ea47f7c9a4..fc83b39ca5 100644 --- a/src/backends/opentsdb/opentsdb.h +++ b/backends/opentsdb/opentsdb.h @@ -3,7 +3,7 @@ #ifndef NETDATA_BACKEND_OPENTSDB_H #define NETDATA_BACKEND_OPENTSDB_H -#include "../backends.h" +#include "backends/backends.h" extern int format_dimension_collected_opentsdb_telnet( BUFFER *b // the buffer to write data to diff --git a/backends/prometheus/Makefile.am b/backends/prometheus/Makefile.am new file mode 100644 index 0000000000..19554bed8e --- /dev/null +++ b/backends/prometheus/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/backends/prometheus/README.md b/backends/prometheus/README.md new file mode 100644 index 0000000000..826cf051bd --- /dev/null +++ b/backends/prometheus/README.md @@ -0,0 +1,376 @@ +> IMPORTANT: the format netdata sends metrics to prometheus has changed since netdata v1.7. The new prometheus backend for netdata supports a lot more features and is aligned to the development of the rest of the netdata backends. + +# Using netdata with Prometheus + +Prometheus is a distributed monitoring system which offers a very simple setup along with a robust data model. Recently netdata added support for Prometheus. I'm going to quickly show you how to install both netdata and prometheus on the same server. We can then use grafana pointed at Prometheus to obtain long term metrics netdata offers. I'm assuming we are starting at a fresh ubuntu shell (whether you'd like to follow along in a VM or a cloud instance is up to you). + +## Installing netdata and prometheus + +### Installing netdata +There are number of ways to install netdata according to [Installation](https://github.com/netdata/netdata/wiki/Installation) +The suggested way of installing the latest netdata and keep it upgrade automatically. Using one line installation: + +``` +bash <(curl -Ss https://my-netdata.io/kickstart.sh) +``` +At this point we should have netdata listening on port 19999. Attempt to take your browser here: + +``` +http://your.netdata.ip:19999 +``` + +*(replace `your.netdata.ip` with the IP or hostname of the server running netdata)* + +### Installing Prometheus +In order to install prometheus we are going to introduce our own systemd startup script along with an example of prometheus.yaml configuration. Prometheus needs to be pointed to your server at a specific target url for it to scrape netdata's api. Prometheus is always a pull model meaning netdata is the passive client within this architecture. Prometheus always initiates the connection with netdata. + +##### Download Prometheus + +```sh +wget -O /tmp/prometheus-2.3.2.linux-amd64.tar.gz https://github.com/prometheus/prometheus/releases/download/v2.3.2/prometheus-2.3.2.linux-amd64.tar.gz +``` + +##### Create prometheus system user + +```sh +sudo useradd -r prometheus +``` + +#### Create prometheus directory + +```sh +sudo mkdir /opt/prometheus +sudo chown prometheus:prometheus /opt/prometheus +``` + +#### Untar prometheus directory + +```sh +sudo tar -xvf /tmp/prometheus-2.3.2.linux-amd64.tar.gz -C /opt/prometheus --strip=1 +``` + +#### Install prometheus.yml + +We will use the following `prometheus.yml` file. Save it at `/opt/prometheus/prometheus.yml`. + +Make sure to replace `your.netdata.ip` with the IP or hostname of the host running netdata. + +```yaml +# my global config +global: + scrape_interval: 5s # Set the scrape interval to every 5 seconds. Default is every 1 minute. + evaluation_interval: 5s # Evaluate rules every 5 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + + # Attach these labels to any time series or alerts when communicating with + # external systems (federation, remote storage, Alertmanager). + external_labels: + monitor: 'codelab-monitor' + +# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. +rule_files: + # - "first.rules" + # - "second.rules" + +# A scrape configuration containing exactly one endpoint to scrape: +# Here it's Prometheus itself. +scrape_configs: + # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config. + - job_name: 'prometheus' + + # metrics_path defaults to '/metrics' + # scheme defaults to 'http'. + + static_configs: + - targets: ['0.0.0.0:9090'] + + - job_name: 'netdata-scrape' + + metrics_path: '/api/v1/allmetrics' + params: + # format: prometheus | prometheus_all_hosts + # You can use `prometheus_all_hosts` if you want Prometheus to set the `instance` to your hostname instead of IP + format: [prometheus] + # + # sources: as-collected | raw | average | sum | volume + # default is: average + #source: [as-collected] + # + # server name for this prometheus - the default is the client IP + # for netdata to uniquely identify it + #server: ['prometheus1'] + honor_labels: true + + static_configs: + - targets: ['{your.netdata.ip}:19999'] +``` +#### Install nodes.yml + +The following is completely optional, it will enable Prometheus to generate alerts from some NetData sources. Tweak the values to your own needs. We will use the following `nodes.yml` file below. Save it at `/opt/prometheus/nodes.yml`, and add a *- "nodes.yml"* entry under the *rule_files:* section in the example prometheus.yml file above. +``` +groups: +- name: nodes + + rules: + - alert: node_high_cpu_usage_70 + expr: avg(rate(netdata_cpu_cpu_percentage_average{dimension="idle"}[1m])) by (job) > 70 + for: 1m + annotations: + description: '{{ $labels.job }} on ''{{ $labels.job }}'' CPU usage is at {{ humanize $value }}%.' + summary: CPU alert for container node '{{ $labels.job }}' + + - alert: node_high_memory_usage_70 + expr: 100 / sum(netdata_system_ram_MB_average) by (job) + * sum(netdata_system_ram_MB_average{dimension=~"free|cached"}) by (job) < 30 + for: 1m + annotations: + description: '{{ $labels.job }} memory usage is {{ humanize $value}}%.' + summary: Memory alert for container node '{{ $labels.job }}' + + - alert: node_low_root_filesystem_space_20 + expr: 100 / sum(netdata_disk_space_GB_average{family="/"}) by (job) + * sum(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}) by (job) < 20 + for: 1m + annotations: + description: '{{ $labels.job }} root filesystem space is {{ humanize $value}}%.' + summary: Root filesystem alert for container node '{{ $labels.job }}' + + - alert: node_root_filesystem_fill_rate_6h + expr: predict_linear(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}[1h], 6 * 3600) < 0 + for: 1h + labels: + severity: critical + annotations: + description: Container node {{ $labels.job }} root filesystem is going to fill up in 6h. + summary: Disk fill alert for Swarm node '{{ $labels.job }}' +``` + +#### Install prometheus.service + +Save this service file as `/etc/systemd/system/prometheus.service`: + +``` +[Unit] +Description=Prometheus Server +AssertPathExists=/opt/prometheus + +[Service] +Type=simple +WorkingDirectory=/opt/prometheus +User=prometheus +Group=prometheus +ExecStart=/opt/prometheus/prometheus --config.file=/opt/prometheus/prometheus.yml --log.level=info +ExecReload=/bin/kill -SIGHUP $MAINPID +ExecStop=/bin/kill -SIGINT $MAINPID + +[Install] +WantedBy=multi-user.target +``` + +##### Start Prometheus + +``` +sudo systemctl start prometheus +sudo systemctl enable prometheus +``` + +Prometheus should now start and listen on port 9090. Attempt to head there with your browser. + +If everything is working correctly when you fetch `http://your.prometheus.ip:9090` you will see a 'Status' tab. Click this and click on 'targets' We should see the netdata host as a scraped target. + +--- + +## netdata support for prometheus + +> IMPORTANT: the format netdata sends metrics to prometheus has changed since netdata v1.6. The new format allows easier queries for metrics and supports both `as collected` and normalized metrics. + +Before explaining the changes, we have to understand the key differences between netdata and prometheus. + +### understanding netdata metrics + +##### charts + +Each chart in netdata has several properties (common to all its metrics): + +- `chart_id` - uniquely identifies a chart. + +- `chart_name` - a more human friendly name for `chart_id`, also unique. + +- `context` - this is the template of the chart. All disk I/O charts have the same context, all mysql requests charts have the same context, etc. This is used for alarm templates to match all the charts they should be attached to. + +- `family` groups a set of charts together. It is used as the submenu of the dashboard. + +- `units` is the units for all the metrics attached to the chart. + +##### dimensions + +Then each netdata chart contains metrics called `dimensions`. All the dimensions of a chart have the same units of measurement, and are contextually in the same category (ie. the metrics for disk bandwidth are `read` and `write` and they are both in the same chart). + +### netdata data source + +netdata can send metrics to prometheus from 3 data sources: + +- `as collected` or `raw` - this data source sends the metrics to prometheus as they are collected. No conversion is done by netdata. The latest value for each metric is just given to prometheus. This is the most preferred method by prometheus, but it is also the harder to work with. To work with this data source, you will need to understand how to get meaningful values out of them. + + The format of the metrics is: `CONTEXT{chart="CHART",family="FAMILY",dimension="DIMENSION"}`. + + If the metric is a counter (`incremental` in netdata lingo), `_total` is appended the context. + + Unlike prometheus, netdata allows each dimension of a chart to have a different algorithm and conversion constants (`multiplier` and `divisor`). In this case, that the dimensions of a charts are heterogeneous, netdata will use this format: `CONTEXT_DIMENSION{chart="CHART",family="FAMILY"}` + +- `average` - this data source uses the netdata database to send the metrics to prometheus as they are presented on the netdata dashboard. So, all the metrics are sent as gauges, at the units they are presented in the netdata dashboard charts. This is the easiest to work with. + + The format of the metrics is: `CONTEXT_UNITS_average{chart="CHART",family="FAMILY",dimension="DIMENSION"}`. + + When this source is used, netdata keeps track of the last access time for each prometheus server fetching the metrics. This last access time is used at the subsequent queries of the same prometheus server to identify the time-frame the `average` will be calculated. So, no matter how frequently prometheus scrapes netdata, it will get all the database data. To identify each prometheus server, netdata uses by default the IP of the client fetching the metrics. If there are multiple prometheus servers fetching data from the same netdata, using the same IP, each prometheus server can append `server=NAME` to the URL. Netdata will use this `NAME` to uniquely identify the prometheus server. + +- `sum` or `volume`, is like `average` but instead of averaging the values, it sums them. + + The format of the metrics is: `CONTEXT_UNITS_sum{chart="CHART",family="FAMILY",dimension="DIMENSION"}`. + All the other operations are the same with `average`. + +Keep in mind that early versions of netdata were sending the metrics as: `CHART_DIMENSION{}`. + + +### Querying Metrics + +Fetch with your web browser this URL: + +`http://your.netdata.ip:19999/api/v1/allmetrics?format=prometheus&help=yes` + +*(replace `your.netdata.ip` with the ip or hostname of your netdata server)* + +netdata will respond with all the metrics it sends to prometheus. + +If you search that page for `"system.cpu"` you will find all the metrics netdata is exporting to prometheus for this chart. `system.cpu` is the chart name on the netdata dashboard (on the netdata dashboard all charts have a text heading such as : `Total CPU utilization (system.cpu)`. What we are interested here in the chart name: `system.cpu`). + +Searching for `"system.cpu"` reveals: + +```sh +# COMMENT homogeneus chart "system.cpu", context "system.cpu", family "cpu", units "percentage" +# COMMENT netdata_system_cpu_percentage_average: dimension "guest_nice", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive +netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="guest_nice"} 0.0000000 1500066662000 +# COMMENT netdata_system_cpu_percentage_average: dimension "guest", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive +netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="guest"} 1.7837326 1500066662000 +# COMMENT netdata_system_cpu_percentage_average: dimension "steal", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive +netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="steal"} 0.0000000 1500066662000 +# COMMENT netdata_system_cpu_percentage_average: dimension "softirq", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive +netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="softirq"} 0.5275442 1500066662000 +# COMMENT netdata_system_cpu_percentage_average: dimension "irq", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive +netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="irq"} 0.2260836 1500066662000 +# COMMENT netdata_system_cpu_percentage_average: dimension "user", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive +netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="user"} 2.3362762 1500066662000 +# COMMENT netdata_system_cpu_percentage_average: dimension "system", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive +netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 1.7961062 1500066662000 +# COMMENT netdata_system_cpu_percentage_average: dimension "nice", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive +netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="nice"} 0.0000000 1500066662000 +# COMMENT netdata_system_cpu_percentage_average: dimension "iowait", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive +netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="iowait"} 0.9671802 1500066662000 +# COMMENT netdata_system_cpu_percentage_average: dimension "idle", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive +netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="idle"} 92.3630770 1500066662000 +``` +*(netdata response for `system.cpu` with source=`average`)* + +In `average` or `sum` data sources, all values are normalized and are reported to prometheus as gauges. Now, use the 'expression' text form in prometheus. Begin to type the metrics we are looking for: `netdata_system_cpu`. You should see that the text form begins to auto-fill as prometheus knows about this metric. + +If the data source was `as collected`, the response would be: + +```sh +# COMMENT homogeneus chart "system.cpu", context "system.cpu", family "cpu", units "percentage" +# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "guest_nice", value * 1 / 1 delta gives percentage (counter) +netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="guest_nice"} 0 1500066716438 +# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "guest", value * 1 / 1 delta gives percentage (counter) +netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="guest"} 63945 1500066716438 +# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "steal", value * 1 / 1 delta gives percentage (counter) +netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="steal"} 0 1500066716438 +# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "softirq", value * 1 / 1 delta gives percentage (counter) +netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="softirq"} 8295 1500066716438 +# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "irq", value * 1 / 1 delta gives percentage (counter) +netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="irq"} 4079 1500066716438 +# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "user", value * 1 / 1 delta gives percentage (counter) +netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="user"} 116488 1500066716438 +# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "system", value * 1 / 1 delta gives percentage (counter) +netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="system"} 35084 1500066716438 +# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "nice", value * 1 / 1 delta gives percentage (counter) +netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="nice"} 505 1500066716438 +# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "iowait", value * 1 / 1 delta gives percentage (counter) +netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="iowait"} 23314 1500066716438 +# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "idle", value * 1 / 1 delta gives percentage (counter) +netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="idle"} 918470 1500066716438 +``` +*(netdata response for `system.cpu` with source=`as-collected`)* + +For more information check prometheus documentation. + +### Streaming data from upstream hosts + +The `format=prometheus` parameter only exports the host's netdata metrics. If you are using the master/slave functionality of netdata this ignores any upstream hosts - so you should consider using the below in your **prometheus.yml**: + +``` + metrics_path: '/api/v1/allmetrics' + params: + format: [prometheus_all_hosts] + honor_labels: true +``` + +This will report all upstream host data, and `honor_labels` will make Prometheus take note of the instance names provided. + +### timestamps + +To pass the metrics through prometheus pushgateway, netdata supports the option `×tamps=no` to send the metrics without timestamps. + +## netdata host variables + +netdata collects various system configuration metrics, like the max number of TCP sockets supported, the max number of files allowed system-wide, various IPC sizes, etc. These metrics are not exposed to prometheus by default. + +To expose them, append `variables=yes` to the netdata URL. + +### TYPE and HELP + +To save bandwidth, and because prometheus does not use them anyway, `# TYPE` and `# HELP` lines are suppressed. If wanted they can be re-enabled via `types=yes` and `help=yes`, e.g. `/api/v1/allmetrics?format=prometheus&types=yes&help=yes` + +### Names and IDs + +netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names are human friendly labels (also unique). + +Most charts and metrics have the same ID and name, but in several cases they are different: disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc. + +The default is controlled in `netdata.conf`: + +``` +[backend] + send names instead of ids = yes | no +``` + +You can overwrite it from prometheus, by appending to the URL: + +* `&names=no` to get IDs (the old behaviour) +* `&names=yes` to get names + +### Filtering metrics sent to prometheus + +netdata can filter the metrics it sends to prometheus with this setting: + +``` +[backend] + send charts matching = * +``` + +This settings accepts a space separated list of patterns to match the **charts** to be sent to prometheus. Each pattern can use ` * ` as wildcard, any number of times (e.g `*a*b*c*` is valid). Patterns starting with ` ! ` give a negative match (e.g `!*.bad users.* groups.*` will send all the users and groups except `bad` user and `bad` group). The order is important: the first match (positive or negative) left to right, is used. + +### Changing the prefix of netdata metrics + +netdata sends all metrics prefixed with `netdata_`. You can change this in `netdata.conf`, like this: + +``` +[backend] + prefix = netdata +``` + +It can also be changed from the URL, by appending `&prefix=netdata`. + +### accuracy of `average` and `sum` data sources + +When the data source is set to `average` or `sum`, netdata remembers the last access of each client accessing prometheus metrics and uses this last access time to respond with the `average` or `sum` of all the entries in the database since that. This means that prometheus servers are not losing data when they access netdata with data source = `average` or `sum`. + +To uniquely identify each prometheus server, netdata uses the IP of the client accessing the metrics. If however the IP is not good enough for identifying a single prometheus server (e.g. when prometheus servers are accessing netdata through a web proxy, or when multiple prometheus servers are NATed to a single IP), each prometheus may append `&server=NAME` to the URL. This `NAME` is used by netdata to uniquely identify each prometheus server and keep track of its last access time. diff --git a/src/backends/prometheus/backend_prometheus.c b/backends/prometheus/backend_prometheus.c similarity index 100% rename from src/backends/prometheus/backend_prometheus.c rename to backends/prometheus/backend_prometheus.c diff --git a/src/backends/prometheus/backend_prometheus.h b/backends/prometheus/backend_prometheus.h similarity index 96% rename from src/backends/prometheus/backend_prometheus.h rename to backends/prometheus/backend_prometheus.h index c0f2b4e9c1..dc4ec753f2 100644 --- a/src/backends/prometheus/backend_prometheus.h +++ b/backends/prometheus/backend_prometheus.h @@ -3,7 +3,7 @@ #ifndef NETDATA_BACKEND_PROMETHEUS_H #define NETDATA_BACKEND_PROMETHEUS_H 1 -#include "../backends.h" +#include "backends/backends.h" typedef enum prometheus_output_flags { PROMETHEUS_OUTPUT_NONE = 0, diff --git a/charts.d/Makefile.am b/charts.d/Makefile.am deleted file mode 100644 index 573e7bcef4..0000000000 --- a/charts.d/Makefile.am +++ /dev/null @@ -1,32 +0,0 @@ -# -# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com> -# SPDX-License-Identifier: GPL-3.0-or-later -# -MAINTAINERCLEANFILES= $(srcdir)/Makefile.in - -dist_charts_SCRIPTS = \ - $(NULL) - -dist_charts_DATA = \ - README.md \ - ap.chart.sh \ - apcupsd.chart.sh \ - apache.chart.sh \ - cpu_apps.chart.sh \ - cpufreq.chart.sh \ - example.chart.sh \ - exim.chart.sh \ - hddtemp.chart.sh \ - libreswan.chart.sh \ - load_average.chart.sh \ - mem_apps.chart.sh \ - mysql.chart.sh \ - nginx.chart.sh \ - nut.chart.sh \ - opensips.chart.sh \ - phpfpm.chart.sh \ - postfix.chart.sh \ - sensors.chart.sh \ - squid.chart.sh \ - tomcat.chart.sh \ - $(NULL) diff --git a/charts.d/README.md b/charts.d/README.md deleted file mode 100644 index 748af08a1b..0000000000 --- a/charts.d/README.md +++ /dev/null @@ -1,344 +0,0 @@ -The following charts.d plugins are supported: - ---- - -# hddtemp - -The plugin will collect temperatures from disks - -It will create one chart with all active disks - -1. **temperature in Celsius** - -### configuration - -hddtemp needs to be running in daemonized mode - -```sh -# host with daemonized hddtemp -hddtemp_host="localhost" - -# port on which hddtemp is showing data -hddtemp_port="7634" - -# array of included disks -# the default is to include all -hddtemp_disks=() -``` - ---- - -# libreswan - -The plugin will collects bytes-in, bytes-out and uptime for all established libreswan IPSEC tunnels. - -The following charts are created, **per tunnel**: - -1. **Uptime** - - * the uptime of the tunnel - -2. **Traffic** - - * bytes in - * bytes out - -### configuration - -Its config file is `/etc/netdata/charts.d/libreswan.conf`. - -The plugin executes 2 commands to collect all the information it needs: - -```sh -ipsec whack --status -ipsec whack --trafficstatus -``` - -The first command is used to extract the currently established tunnels, their IDs and their names. -The second command is used to extract the current uptime and traffic. - -Most probably user `netdata` will not be able to query libreswan, so the `ipsec` commands will be denied. -The plugin attempts to run `ipsec` as `sudo ipsec ...`, to get access to libreswan statistics. - -To allow user `netdata` execute `sudo ipsec ...`, create the file `/etc/sudoers.d/netdata` with this content: - -``` -netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status -netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus -``` - -Make sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path). - ---- - -# mysql - -The plugin will monitor one or more mysql servers - -It will produce the following charts: - -1. **Bandwidth** in kbps - * in - * out - -2. **Queries** in queries/sec - * queries - * questions - * slow queries - -3. **Operations** in operations/sec - * opened tables - * flush - * commit - * delete - * prepare - * read first - * read key - * read next - * read prev - * read random - * read random next - * rollback - * save point - * update - * write - -4. **Table Locks** in locks/sec - * immediate - * waited - -5. **Select Issues** in issues/sec - * full join - * full range join - * range - * range check - * scan - -6. **Sort Issues** in issues/sec - * merge passes - * range - * scan - -### configuration - -You can configure many database servers, like this: - -You can provide, per server, the following: - -1. a name, anything you like, but keep it short -2. the mysql command to connect to the server -3. the mysql command line options to be used for connecting to the server - -Here is an example for 2 servers: - -```sh -mysql_opts[server1]="-h server1.example.com" -mysql_opts[server2]="-h server2.example.com --connect_timeout 2" -``` - -The above will use the `mysql` command found in the system path. -You can also provide a custom mysql command per server, like this: - -```sh -mysql_cmds[server2]="/opt/mysql/bin/mysql" -``` - -The above sets the mysql command only for server2. server1 will use the system default. - -If no configuration is given, the plugin will attempt to connect to mysql server at localhost. - - ---- - -# nut - -The plugin will collect UPS data for all UPSes configured in the system. - -The following charts will be created: - -1. **UPS Charge** - - * percentage changed - -2. **UPS Battery Voltage** - - * current voltage - * high voltage - * low voltage - * nominal voltage - -3. **UPS Input Voltage** - - * current voltage - * fault voltage - * nominal voltage - -4. **UPS Input Current** - - * nominal current - -5. **UPS Input Frequency** - - * current frequency - * nominal frequency - -6. **UPS Output Voltage** - - * current voltage - -7. **UPS Load** - - * current load - -8. **UPS Temperature** - - * current temperature - - -### configuration - -This is the internal default for `/etc/netdata/nut.conf` - -```sh -# a space separated list of UPS names -# if empty, the list returned by 'upsc -l' will be used -nut_ups= - -# how frequently to collect UPS data -nut_update_every=2 -``` - ---- - -# postfix - -The plugin will collect the postfix queue size. - -It will create two charts: - -1. **queue size in emails** -2. **queue size in KB** - -### configuration - -This is the internal default for `/etc/netdata/postfix.conf` - -```sh -# the postqueue command -# if empty, it will use the one found in the system path -postfix_postqueue= - -# how frequently to collect queue size -postfix_update_every=15 -``` - ---- - -# sensors - -The plugin will provide charts for all configured system sensors - -> This plugin is reading sensors directly from the kernel. -> The `lm-sensors` package is able to perform calculations on the -> kernel provided values, this plugin will not perform. -> So, the values graphed, are the raw hardware values of the sensors. - -The plugin will create netdata charts for: - -1. **Temperature** -2. **Voltage** -3. **Current** -4. **Power** -5. **Fans Speed** -6. **Energy** -7. **Humidity** - -One chart for every sensor chip found and each of the above will be created. - -### configuration - -This is the internal default for `/etc/netdata/sensors.conf` - -```sh -# the directory the kernel keeps sensor data -sensors_sys_dir="${NETDATA_HOST_PREFIX}/sys/devices" - -# how deep in the tree to check for sensor data -sensors_sys_depth=10 - -# if set to 1, the script will overwrite internal -# script functions with code generated ones -# leave to 1, is faster -sensors_source_update=1 - -# how frequently to collect sensor data -# the default is to collect it at every iteration of charts.d -sensors_update_every= - -# array of sensors which are excluded -# the default is to include all -sensors_excluded=() -``` - ---- - -# squid - -The plugin will monitor a squid server. - -It will produce 4 charts: - -1. **Squid Client Bandwidth** in kbps - - * in - * out - * hits - -2. **Squid Client Requests** in requests/sec - - * requests - * hits - * errors - -3. **Squid Server Bandwidth** in kbps - - * in - * out - -4. **Squid Server Requests** in requests/sec - - * requests - * errors - -### autoconfig - -The plugin will by itself detect squid servers running on -localhost, on ports 3128 or 8080. - -It will attempt to download URLs in the form: - -- `cache_object://HOST:PORT/counters` -- `/squid-internal-mgr/counters` - -If any succeeds, it will use this. - -### configuration - -If you need to configure it by hand, create the file -`/etc/netdata/squid.conf` with the following variables: - -- `squid_host=IP` the IP of the squid host -- `squid_port=PORT` the port the squid is listening -- `squid_url="URL"` the URL with the statistics to be fetched from squid -- `squid_timeout=SECONDS` how much time we should wait for squid to respond -- `squid_update_every=SECONDS` the frequency of the data collection - -Example `/etc/netdata/squid.conf`: - -```sh -squid_host=127.0.0.1 -squid_port=3128 -squid_url="cache_object://127.0.0.1:3128/counters" -squid_timeout=2 -squid_update_every=5 -``` diff --git a/collectors/Makefile.am b/collectors/Makefile.am new file mode 100644 index 0000000000..4ecd1f1761 --- /dev/null +++ b/collectors/Makefile.am @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +SUBDIRS = \ + plugins.d \ + apps.plugin \ + cgroups.plugin \ + charts.d.plugin \ + checks.plugin \ + diskspace.plugin \ + fping.plugin \ + freebsd.plugin \ + freeipmi.plugin \ + idlejitter.plugin \ + macos.plugin \ + nfacct.plugin \ + node.d.plugin \ + proc.plugin \ + python.d.plugin \ + statsd.plugin \ + tc.plugin \ + $(NULL) + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/collectors/README.md b/collectors/README.md new file mode 100644 index 0000000000..3068bf4b55 --- /dev/null +++ b/collectors/README.md @@ -0,0 +1,118 @@ +# Data Collection Plugins + +netdata supports **internal** and **external** data collection plugins: + +- **internal** plugins are written in `C` and run as threads inside the netdata daemon. + +- **external** plugins may be written in any computer language and are spawn as independent long-running processes by the netdata daemon. + They communicate with the netdata daemon via `pipes` (`stdout` communication). + +To minimize the number of processes spawn for data collection, netdata also supports **plugin orchestrators**. + +- **plugin orchestrators** are external plugins that do not collect any data by themeselves. + Instead they support data collection **modules** written in the language of the orchestrator. + Usually the orchestrator provides a higher level abstraction, making it ideal for writing new + data collection modules with the minimum of code. + + Currently netdata provides plugin orchestrators + BASH v4+ [charts.d.plugin](charts.d.plugin), + node.js [node.d.plugin](node.d.plugin) and + python v2+ (including v3) [python.d.plugin](python.d.plugin). + +## Netdata Plugins + +plugin|lang|O/S|runs as|modular|description +:---:|:---:|:---:|:---:|:---:|:--- +[apps.plugin](apps.plugin/)|`C`|linux, freebsd|external|-|monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**. +[cgroups.plugin](cgroups.plugin/)|`C`|linux|internal|-|collects resource usage of **Containers**, libvirt **VMs** and **systemd services**, on Linux systems +[charts.d.plugin](charts.d.plugin/)|`BASH` v4+|any|external|yes|a **plugin orchestrator** for data collection modules written in `BASH` v4+. +[checks.plugin](checks.plugin/)|`C`|any|internal|-|a debugging plugin (by default it is disabled) +[diskspace.plugin](diskspace.plugin/)|`C`|linux|internal|-|collects disk space usage metrics on Linux mount points +[fping.plugin](fping.plugin/)|`C`|any|external|-|measures network latency, jitter and packet loss between the monitored node and any number of remote network end points. +[freebsd.plugin](freebsd.plugin/)|`C`|freebsd|internal|yes|collects resource usage and performance data on FreeBSD systems +[freeipmi.plugin](freeipmi.plugin/)|`C`|linux|external|-|collects metrics from enterprise hardware sensors, on Linux servers. +[idlejitter.plugin](idlejitter.plugin/)|`C`|any|internal|-|measures CPU latency and jitter on all operating systems +[macos.plugin](macos.plugin/)|`C`|macos|internal|yes|collects resource usage and performance data on MacOS systems +[nfacct.plugin](nfacct.plugin/)|`C`|linux|internal|-|collects netfilter firewall, connection tracker and accounting metrics using `libmnl` and `libnetfilter_acct` +[node.d.plugin](node.d.plugin/)|`node.js`|any|external|yes|a **plugin orchestrator** for data collection modules written in `node.js`. +[plugins.d](plugins.d/)|`C`|any|internal|-|implements the **external plugins** API and serves external plugins +[proc.plugin](proc.plugin/)|`C`|linux|internal|yes|collects resource usage and performance data on Linux systems +[python.d.plugin](python.d.plugin/)|`python` v2+|any|external|yes|a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported). +[statsd.plugin](statsd.plugin/)|`C`|any|internal|-|implements a high performance **statsd** server for netdata +[tc.plugin](tc.plugin/)|`C`|linux|internal|-|collects traffic QoS metrics (`tc`) of Linux network interfaces + +## Enabling and Disabling plugins + +Each plugin can be enabled or disabled via `netdata.conf`, section `[plugins]`. + +At this section there a list of all the plugins with a boolean setting to enable them or disable them. + +The exception is `statsd.plugin` that has its own `[statsd]` section. + +Once a plugin is enabled, consult the page of each plugin for additional configuration options. + +All **external plugins** are managed by [plugins.d](plugins.d/), which provides additional management options. + +### Internal Plugins + +Each of the internal plugins runs as a thread inside the netdata daemon. +Once this thread has started, the plugin may spawn additional threads according to its design. + +#### Internal Plugins API + +The internal data collection API consists of the following calls: + +```c +collect_data() { + // collect data here (one iteration) + + collected_number collected_value = collect_a_value(); + + // give the metrics to netdata + + static RRDSET *st = NULL; // the chart + static RRDDIM *rd = NULL; // a dimension attached to this chart + + if(unlikely(!st)) { + // we haven't created this chart before + // create it now + st = rrdset_create_localhost( + "type" + , "id" + , "name" + , "family" + , "context" + , "Chart Title" + , "units" + , "plugin-name" + , "module-name" + , priority + , update_every + , chart_type + ); + + // attach a metric to it + rd = rrddim_add(st, "id", "name", multiplier, divider, algorithm); + } + else { + // this chart is already created + // let netdata know we start a new iteration on it + rrdset_next(st); + } + + // give the collected value(s) to the chart + rrddim_set_by_pointer(st, rd, collected_value); + + // signal netdata we are done with this iteration + rrdset_done(st); +} +``` + +Of course netdata has a lot of libraries to help you also in collecting the metrics. +The best way to find your way through this, is to examine what other similar plugins do. + + +### External Plugins + +**External plugins** use the API and are managed by [plugins.d](plugins.d/). + diff --git a/src/plugins/all.h b/collectors/all.h similarity index 97% rename from src/plugins/all.h rename to collectors/all.h index c08c7d9e75..aa19bd5bd3 100644 --- a/src/plugins/all.h +++ b/collectors/all.h @@ -3,22 +3,23 @@ #ifndef NETDATA_ALL_H #define NETDATA_ALL_H 1 -#include "../common.h" +#include "../daemon/common.h" // netdata internal data collection plugins #include "checks.plugin/plugin_checks.h" #include "freebsd.plugin/plugin_freebsd.h" #include "idlejitter.plugin/plugin_idlejitter.h" -#include "linux-cgroups.plugin/sys_fs_cgroup.h" -#include "linux-diskspace.plugin/plugin_diskspace.h" -#include "linux-nfacct.plugin/plugin_nfacct.h" -#include "linux-proc.plugin/plugin_proc.h" -#include "linux-tc.plugin/plugin_tc.h" +#include "cgroups.plugin/sys_fs_cgroup.h" +#include "diskspace.plugin/plugin_diskspace.h" +#include "nfacct.plugin/plugin_nfacct.h" +#include "proc.plugin/plugin_proc.h" +#include "tc.plugin/plugin_tc.h" #include "macos.plugin/plugin_macos.h" -#include "plugins.d.plugin/plugins_d.h" #include "statsd.plugin/statsd.h" +#include "plugins.d/plugins_d.h" + // ---------------------------------------------------------------------------- // netdata chart priorities diff --git a/collectors/apps.plugin/Makefile.am b/collectors/apps.plugin/Makefile.am new file mode 100644 index 0000000000..be0306492a --- /dev/null +++ b/collectors/apps.plugin/Makefile.am @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects + +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +dist_libconfig_DATA = \ + apps_groups.conf \ + $(NULL) diff --git a/collectors/apps.plugin/README.md b/collectors/apps.plugin/README.md new file mode 100644 index 0000000000..c427e04223 --- /dev/null +++ b/collectors/apps.plugin/README.md @@ -0,0 +1,103 @@ +# apps.plugin + +This plugin provides charts for 3 sections of the default dashboard: + +1. Per application charts +2. Per user charts +3. Per user group charts + +## Per application charts + +This plugin walks through the entire `/proc` filesystem and aggregates statistics for applications of interest, defined in `/etc/netdata/apps_groups.conf` (the default is [here](apps_groups.conf)) (to edit it on your system run `/etc/netdata/edit-config apps_groups.conf`). + +The plugin internally builds a process tree (much like `ps fax` does), and groups processes together (evaluating both child and parent processes) so that the result is always a chart with a predefined set of dimensions (of course, only application groups found running are reported). + +Using this information it provides the following charts (per application group defined in `/etc/netdata/apps_groups.conf` - to edit it on your system run `/etc/netdata/edit-config apps_groups.conf`): + +1. Total CPU usage +2. Total User CPU usage +3. Total System CPU usage +4. Total Disk Physical Reads +5. Total Disk Physical Writes +6. Total Disk Logical Reads +7. Total Disk Logical Writes +8. Total Open Files (unique files - if a file is found open multiple times, it is counted just once) +9. Total Dedicated Memory (non shared) +10. Total Minor Page Faults +11. Total Number of Processes +12. Total Number of Threads +13. Total Number of Pipes +14. Total Swap Activity (Major Page Faults) +15. Total Open Sockets + +## Per User Charts + +All the above charts, are also grouped by username, using the effective uid of each process. + +## Per Group Charts + +All the above charts, are also grouped by group name, using the effective gid of each process. + +## CPU Usage + +`apps.plugin` is a complex software piece and has a lot of work to do (actually this plugin requires more CPU resources that the netdata daemon). For each process running, `apps.plugin` reads several `/proc` files to get CPU usage, memory allocated, I/O usage, open file descriptors, etc. Doing this work per-second, especially on hosts with several thousands of processes, may increase the CPU resources consumed by the plugin. + +In such cases, you many need to lower its data collection frequency. To do this, edit `/etc/netdata/netdata.conf` and find this section: + +``` +[plugin:apps] + # update every = 1 + # command options = +``` + +Uncomment the line `update every` and set it to a higher number. If you just set it to ` 2 `, its CPU resources will be cut in half, and data collection will be once every 2 seconds. + + +## Configuration + +The configuration file is `/etc/netdata/apps_groups.conf` (the default is [here](apps_groups.conf)). +To edit it on your system run `/etc/netdata/edit-config apps_groups.conf`. + +The configuration file works accepts multiple lines, each having this format: + +```txt +group: process1 process2 ... +``` + +Process names should be given as they appear when running `ps -e`. The program will actually match the process names in the `/proc/PID/status` file. So, to be sure the name is right for a process running with PID ` X `, do this: + +```sh +cat /proc/X/status +``` + +The first line on the output is `Name: xxxxx`. This is the process name `apps.plugin` sees. + +The order of the lines in the file is important only if you include the same process name to multiple groups. + +## Apps plugin is missing information + +`apps.plugin` requires additional privileges to collect all the information it needs. The problem is described in issue #157. + +When netdata is installed, `apps.plugin` is given the capabilities `cap_dac_read_search,cap_sys_ptrace+ep`. If that is not possible (i.e. `setcap` fails), `apps.plugin` is setuid to `root`. + +## linux capabilities in containers + +There are a few cases, like `docker` and `virtuozzo` containers, where `setcap` succeeds, but the capabilities are silently ignored (in `lxc` containers `setcap` fails). + +In these cases that `setcap` succeeds by capabilities do not work, you will have to setuid to root `apps.plugin` by running these commands: + +```sh +chown root:netdata /usr/libexec/netdata/plugins.d/apps.plugin +chmod 4750 /usr/libexec/netdata/plugins.d/apps.plugin +``` + +You will have to run these, every time you update netdata. + + +### Is is safe to give `apps.plugin` these privileges? + +`apps.plugin` performs a hard-coded function of building the process tree in memory, iterating forever, collecting metrics for each running process and sending them to netdata. This is a one-way communication, from `apps.plugin` to netdata. + +So, since `apps.plugin` cannot be instructed by netdata for the actions it performs, we think it is pretty safe to allow it have these increased privileges. + +Keep in mind that `apps.plugin` will still run without these permissions, but it will not be able to collect all the data for every process. diff --git a/conf.d/apps_groups.conf b/collectors/apps.plugin/apps_groups.conf similarity index 100% rename from conf.d/apps_groups.conf rename to collectors/apps.plugin/apps_groups.conf diff --git a/src/plugins/apps.plugin/apps_plugin.c b/collectors/apps.plugin/apps_plugin.c similarity index 100% rename from src/plugins/apps.plugin/apps_plugin.c rename to collectors/apps.plugin/apps_plugin.c diff --git a/collectors/cgroups.plugin/Makefile.am b/collectors/cgroups.plugin/Makefile.am new file mode 100644 index 0000000000..fd878049d0 --- /dev/null +++ b/collectors/cgroups.plugin/Makefile.am @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +CLEANFILES = \ + cgroup-name.sh \ + $(NULL) + +include $(top_srcdir)/build/subst.inc +SUFFIXES = .in + +dist_plugins_SCRIPTS = \ + cgroup-name.sh \ + cgroup-network-helper.sh \ + $(NULL) + +dist_noinst_DATA = \ + cgroup-name.sh.in \ + $(NULL) diff --git a/src/plugins/linux-cgroups.plugin/cgroup-name.sh.in b/collectors/cgroups.plugin/cgroup-name.sh.in similarity index 100% rename from src/plugins/linux-cgroups.plugin/cgroup-name.sh.in rename to collectors/cgroups.plugin/cgroup-name.sh.in diff --git a/src/plugins/linux-cgroups.plugin/cgroup-network-helper.sh b/collectors/cgroups.plugin/cgroup-network-helper.sh similarity index 100% rename from src/plugins/linux-cgroups.plugin/cgroup-network-helper.sh rename to collectors/cgroups.plugin/cgroup-network-helper.sh diff --git a/src/plugins/linux-cgroups.plugin/cgroup-network.c b/collectors/cgroups.plugin/cgroup-network.c similarity index 99% rename from src/plugins/linux-cgroups.plugin/cgroup-network.c rename to collectors/cgroups.plugin/cgroup-network.c index 1eb42cb12b..7fa7ee9608 100644 --- a/src/plugins/linux-cgroups.plugin/cgroup-network.c +++ b/collectors/cgroups.plugin/cgroup-network.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../../common.h" +#include "../../daemon/common.h" #ifdef HAVE_SETNS #ifndef _GNU_SOURCE diff --git a/src/plugins/linux-cgroups.plugin/sys_fs_cgroup.c b/collectors/cgroups.plugin/sys_fs_cgroup.c similarity index 100% rename from src/plugins/linux-cgroups.plugin/sys_fs_cgroup.c rename to collectors/cgroups.plugin/sys_fs_cgroup.c diff --git a/src/plugins/linux-cgroups.plugin/sys_fs_cgroup.h b/collectors/cgroups.plugin/sys_fs_cgroup.h similarity index 89% rename from src/plugins/linux-cgroups.plugin/sys_fs_cgroup.h rename to collectors/cgroups.plugin/sys_fs_cgroup.h index d5d86d050c..09ce5e3fb3 100644 --- a/src/plugins/linux-cgroups.plugin/sys_fs_cgroup.h +++ b/collectors/cgroups.plugin/sys_fs_cgroup.h @@ -3,7 +3,7 @@ #ifndef NETDATA_SYS_FS_CGROUP_H #define NETDATA_SYS_FS_CGROUP_H 1 -#include "../../common.h" +#include "../../daemon/common.h" #if (TARGET_OS == OS_LINUX) @@ -20,7 +20,7 @@ extern void *cgroups_main(void *ptr); -#include "../linux-proc.plugin/plugin_proc.h" +#include "../proc.plugin/plugin_proc.h" #else // (TARGET_OS == OS_LINUX) diff --git a/collectors/charts.d.plugin/Makefile.am b/collectors/charts.d.plugin/Makefile.am new file mode 100644 index 0000000000..1d580c947d --- /dev/null +++ b/collectors/charts.d.plugin/Makefile.am @@ -0,0 +1,94 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +CLEANFILES = \ + charts.d.plugin \ + $(NULL) + +include $(top_srcdir)/build/subst.inc +SUFFIXES = .in + +dist_libconfig_DATA = \ + charts.d.conf \ + $(NULL) + +dist_plugins_SCRIPTS = \ + charts.d.dryrun-helper.sh \ + charts.d.plugin \ + loopsleepms.sh.inc \ + $(NULL) + +dist_noinst_DATA = \ + charts.d.plugin.in \ + ap/README.md \ + apache/README.md \ + apcupsd/README.md \ + cpu_apps/README.md \ + cpufreq/README.md \ + example/README.md \ + exim/README.md \ + hddtemp/README.md \ + libreswan/README.md \ + load_average/README.md \ + mem_apps/README.md \ + mysql/README.md \ + nginx/README.md \ + nut/README.md \ + opensips/README.md \ + phpfpm/README.md \ + postfix/README.md \ + sensors/README.md \ + squid/README.md \ + tomcat/README.md \ + $(NULL) + +dist_charts_SCRIPTS = \ + $(NULL) + +dist_charts_DATA = \ + ap/ap.chart.sh \ + apcupsd/apcupsd.chart.sh \ + apache/apache.chart.sh \ + cpu_apps/cpu_apps.chart.sh \ + cpufreq/cpufreq.chart.sh \ + example/example.chart.sh \ + exim/exim.chart.sh \ + hddtemp/hddtemp.chart.sh \ + libreswan/libreswan.chart.sh \ + load_average/load_average.chart.sh \ + mem_apps/mem_apps.chart.sh \ + mysql/mysql.chart.sh \ + nginx/nginx.chart.sh \ + nut/nut.chart.sh \ + opensips/opensips.chart.sh \ + phpfpm/phpfpm.chart.sh \ + postfix/postfix.chart.sh \ + sensors/sensors.chart.sh \ + squid/squid.chart.sh \ + tomcat/tomcat.chart.sh \ + $(NULL) + +chartsconfigdir=$(libconfigdir)/charts.d +dist_chartsconfig_DATA = \ + ap/ap.conf \ + apache/apache.conf \ + apcupsd/apcupsd.conf \ + cpu_apps/cpu_apps.conf \ + cpufreq/cpufreq.conf \ + example/example.conf \ + exim/exim.conf \ + hddtemp/hddtemp.conf \ + libreswan/libreswan.conf \ + load_average/load_average.conf \ + mem_apps/mem_apps.conf \ + mysql/mysql.conf \ + nginx/nginx.conf \ + nut/nut.conf \ + opensips/opensips.conf \ + phpfpm/phpfpm.conf \ + postfix/postfix.conf \ + sensors/sensors.conf \ + squid/squid.conf \ + tomcat/tomcat.conf \ + $(NULL) diff --git a/collectors/charts.d.plugin/README.md b/collectors/charts.d.plugin/README.md new file mode 100644 index 0000000000..b224bffe3a --- /dev/null +++ b/collectors/charts.d.plugin/README.md @@ -0,0 +1,193 @@ +# charts.d.plugin + +`charts.d.plugin` is a netdata external plugin. It is an **orchestrator** for data collection modules written in `BASH` v4+. + +1. It runs as an independent process `ps fax` shows it +2. It is started and stopped automatically by netdata +3. It communicates with netdata via a unidirectional pipe (sending data to the netdata daemon) +4. Supports any number of data collection **modules** + +`charts.d.plugin` has been designed so that the actual script that will do data collection will be permanently in +memory, collecting data with as little overheads as possible +(i.e. initialize once, repeatedly collect values with minimal overhead). + +`charts.d.plugin` looks for scripts in `/usr/lib/netdata/charts.d`. +The scripts should have the filename suffix: `.chart.sh`. + +## Configuration + +`charts.d.plugin` itself can be configured using the configuration file `/etc/netdata/charts.d.conf` +(to edit it on your system run `/etc/netdata/edit-config charts.d.conf`). This file is also a BASH script. + +In this file, you can place statements like this: + +``` +enable_all_charts="yes" +X="yes" +Y="no" +``` + +where `X` and `Y` are the names of individual charts.d collector scripts. +When set to `yes`, charts.d will evaluate the collector script (see below). +When set to `no`, charts.d will ignore the collector script. + +The variable `enable_all_charts` sets the default enable/disable state for all charts. + +## A charts.d module + +A `charts.d.plugin` module is a BASH script defining a few functions. + +For a module called `X`, the following criteria must be met: + +1. The module script must be called `X.chart.sh` and placed in `/usr/libexec/netdata/charts.d`. + +2. If the module needs a configuration, it should be called `X.conf` and placed in `/etc/netdata/charts.d`. + The configuration file `X.conf` is also a BASH script itself. + To edit the default files supplied by netdata run `/etc/netdata/edit-config charts.d/X.conf`, + where `X` is the name of the module. + +3. All functions and global variables defined in the script and its configuration, must begin with `X_`. + +4. The following functions must be defined: + + - `X_check()` - returns 0 or 1 depending on whether the module is able to run or not + (following the standard Linux command line return codes: 0 = OK, the collector can operate and 1 = FAILED, + the collector cannot be used). + + - `X_create()` - creates the netdata charts, following the standard netdata plugin guides as described in + **[External Plugins](../plugins.d/)** (commands `CHART` and `DIMENSION`). + The return value does matter: 0 = OK, 1 = FAILED. + + - `X_update()` - collects the values for the defined charts, following the standard netdata plugin guides + as described in **[External Plugins](../plugins.d/)** (commands `BEGIN`, `SET`, `END`). + The return value also matters: 0 = OK, 1 = FAILED. + +5. The following global variables are available to be set: + - `X_update_every` - is the data collection frequency for the module script, in seconds. + +The module script may use more functions or variables. But all of them must begin with `X_`. + +The standard netdata plugin variables are also available (check **[External Plugins](../plugins.d/)**). + +### X_check() + +The purpose of the BASH function `X_check()` is to check if the module can collect data (or check its config). + +For example, if the module is about monitoring a local mysql database, the `X_check()` function may attempt to +connect to a local mysql database to find out if it can read the values it needs. + +`X_check()` is run only once for the lifetime of the module. + +### X_create() + +The purpose of the BASH function `X_create()` is to create the charts and dimensions using the standard netdata +plugin guides (**[External Plugins](../plugins.d/)**). + +`X_create()` will be called just once and only after `X_check()` was successful. +You can however call it yourself when there is need for it (for example to add a new dimension to an existing chart). + +A non-zero return value will disable the collector. + +### X_update() + +`X_update()` will be called repeatedly every `X_update_every` seconds, to collect new values and send them to netdata, +following the netdata plugin guides (**[External Plugins](../plugins.d/)**). + +The function will be called with one parameter: microseconds since the last time it was run. This value should be +appended to the `BEGIN` statement of every chart updated by the collector script. + +A non-zero return value will disable the collector. + +### Useful functions charts.d provides + +Module scripts can use the following charts.d functions: + +#### require_cmd command + +`require_cmd()` will check if a command is available in the running system. + +For example, your `X_check()` function may use it like this: + +```sh +mysql_check() { + require_cmd mysql || return 1 + return 0 +} +``` + +Using the above, if the command `mysql` is not available in the system, the `mysql` module will be disabled. + +#### fixid "string" + +`fixid()` will get a string and return a properly formatted id for a chart or dimension. + +This is an expensive function that should not be used in `X_update()`. +You can keep the generated id in a BASH associative array to have the values availables in `X_update()`, like this: + +```sh +declare -A X_ids=() +X_create() { + local name="a very bad name for id" + + X_ids[$name]="$(fixid "$name")" +} + +X_update() { + local microseconds="$1" + + ... + local name="a very bad name for id" + ... + + echo "BEGIN ${X_ids[$name]} $microseconds" + ... +} +``` + +### Debugging your collectors + +You can run `charts.d.plugin` by hand with something like this: + +```sh +# become user netdata +sudo su -s /bin/sh netdata + +# run the plugin in debug mode +/usr/libexec/netdata/plugins.d/charts.d.plugin debug 1 X Y Z +``` + +Charts.d will run in `debug` mode, with an update frequency of `1`, evaluating only the collector scripts +`X`, `Y` and `Z`. You can define zero or more module scripts. If none is defined, charts.d will evaluate all +module scripts available. + +Keep in mind that if your configs are not in `/etc/netdata`, you should do the following before running +`charts.d.plugin`: + +```sh +export NETDATA_USER_CONFIG_DIR="/path/to/etc/netdata" +``` + +Also, remember that netdata runs `chart.d.plugin` as user `netdata` (or any other user netdata is configured to run as). + + +## Running multiple instances of charts.d.plugin + +`charts.d.plugin` will call the `X_update()` function one after another. This means that a delay in collector `X` +will also delay the collection of `Y` and `Z`. + +You can have multiple `charts.d.plugin` running to overcome this problem. + +This is what you need to do: + +1. Decide a new name for the new charts.d instance: example `charts2.d`. + +2. Create/edit the files `/etc/netdata/charts.d.conf` and `/etc/netdata/charts2.d.conf` and enable / disable the + module you want each to run. Remember to set `enable_all_charts="no"` to both of them, and enable the individual + modules for each. + +3. link `/usr/libexec/netdata/plugins.d/charts.d.plugin` to `/usr/libexec/netdata/plugins.d/charts2.d.plugin`. + Netdata will spawn a new charts.d process. + +Execute the above in this order, since netdata will (by default) attempt to start new plugins soon after they are +created in `/usr/libexec/netdata/plugins.d/`. + diff --git a/collectors/charts.d.plugin/ap/README.md b/collectors/charts.d.plugin/ap/README.md new file mode 100644 index 0000000000..1b82f49bac --- /dev/null +++ b/collectors/charts.d.plugin/ap/README.md @@ -0,0 +1,86 @@ +# Access Point Plugin (ap) + +The `ap` collector visualizes data related to access points. + +The source code is [here](https://github.com/netdata/netdata/blob/master/charts.d/ap.chart.sh). + +## Example netdata charts + + + +## How it works + +It does the following: + +1. Runs `iw dev` searching for interfaces that have `type AP`. + + From the same output it collects the SSIDs each AP supports by looking for lines `ssid NAME`. + + Example: +```sh +# iw dev +phy#0 + Interface wlan0 + ifindex 3 + wdev 0x1 + addr 7c:dd:90:77:34:2a + ssid TSAOUSIS + type AP + channel 7 (2442 MHz), width: 20 MHz, center1: 2442 MHz +``` + + +2. For each interface found, it runs `iw INTERFACE station dump`. + + From the output is collects: + + - rx/tx bytes + - rx/tx packets + - tx retries + - tx failed + - signal strength + - rx/tx bitrate + - expected throughput + + Example: + +```sh +# iw wlan0 station dump +Station 40:b8:37:5a:ed:5e (on wlan0) + inactive time: 910 ms + rx bytes: 15588897 + rx packets: 127772 + tx bytes: 52257763 + tx packets: 95802 + tx retries: 2162 + tx failed: 28 + signal: -43 dBm + signal avg: -43 dBm + tx bitrate: 65.0 MBit/s MCS 7 + rx bitrate: 1.0 MBit/s + expected throughput: 32.125Mbps + authorized: yes + authenticated: yes + preamble: long + WMM/WME: yes + MFP: no + TDLS peer: no +``` + +3. For each interface found, it creates 6 charts: + + - Number of Connected clients + - Bandwidth for all clients + - Packets for all clients + - Transmit Issues for all clients + - Average Signal among all clients + - Average Bitrate (including average expected throughput) among all clients + +## Configuration + +You can only set `ap_update_every=NUMBER` to `/etc/netdata/charts.d/ap.conf`, to give the data collection frequency. +To edit this file on your system run `/etc/netdata/edit-config charts.d/ap.conf`. + +## Auto-detection + +The plugin is able to auto-detect if you are running access points on your linux box. diff --git a/charts.d/ap.chart.sh b/collectors/charts.d.plugin/ap/ap.chart.sh similarity index 100% rename from charts.d/ap.chart.sh rename to collectors/charts.d.plugin/ap/ap.chart.sh diff --git a/conf.d/charts.d/ap.conf b/collectors/charts.d.plugin/ap/ap.conf similarity index 100% rename from conf.d/charts.d/ap.conf rename to collectors/charts.d.plugin/ap/ap.conf diff --git a/collectors/charts.d.plugin/apache/README.md b/collectors/charts.d.plugin/apache/README.md new file mode 100644 index 0000000000..d82951aacc --- /dev/null +++ b/collectors/charts.d.plugin/apache/README.md @@ -0,0 +1,2 @@ +> THIS MODULE IS OBSOLETE. +> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT diff --git a/charts.d/apache.chart.sh b/collectors/charts.d.plugin/apache/apache.chart.sh similarity index 100% rename from charts.d/apache.chart.sh rename to collectors/charts.d.plugin/apache/apache.chart.sh diff --git a/conf.d/charts.d/apache.conf b/collectors/charts.d.plugin/apache/apache.conf similarity index 100% rename from conf.d/charts.d/apache.conf rename to collectors/charts.d.plugin/apache/apache.conf diff --git a/python.d/python_modules/__init__.py b/collectors/charts.d.plugin/apcupsd/README.md similarity index 100% rename from python.d/python_modules/__init__.py rename to collectors/charts.d.plugin/apcupsd/README.md diff --git a/charts.d/apcupsd.chart.sh b/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh similarity index 100% rename from charts.d/apcupsd.chart.sh rename to collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh diff --git a/conf.d/charts.d/apcupsd.conf b/collectors/charts.d.plugin/apcupsd/apcupsd.conf similarity index 100% rename from conf.d/charts.d/apcupsd.conf rename to collectors/charts.d.plugin/apcupsd/apcupsd.conf diff --git a/conf.d/charts.d.conf b/collectors/charts.d.plugin/charts.d.conf similarity index 100% rename from conf.d/charts.d.conf rename to collectors/charts.d.plugin/charts.d.conf diff --git a/plugins.d/charts.d.dryrun-helper.sh b/collectors/charts.d.plugin/charts.d.dryrun-helper.sh similarity index 100% rename from plugins.d/charts.d.dryrun-helper.sh rename to collectors/charts.d.plugin/charts.d.dryrun-helper.sh diff --git a/plugins.d/charts.d.plugin.in b/collectors/charts.d.plugin/charts.d.plugin.in similarity index 100% rename from plugins.d/charts.d.plugin.in rename to collectors/charts.d.plugin/charts.d.plugin.in diff --git a/collectors/charts.d.plugin/cpu_apps/README.md b/collectors/charts.d.plugin/cpu_apps/README.md new file mode 100644 index 0000000000..cd8adf0a20 --- /dev/null +++ b/collectors/charts.d.plugin/cpu_apps/README.md @@ -0,0 +1,2 @@ +> THIS MODULE IS OBSOLETE. +> USE APPS.PLUGIN. diff --git a/charts.d/cpu_apps.chart.sh b/collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh similarity index 100% rename from charts.d/cpu_apps.chart.sh rename to collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh diff --git a/conf.d/charts.d/cpu_apps.conf b/collectors/charts.d.plugin/cpu_apps/cpu_apps.conf similarity index 100% rename from conf.d/charts.d/cpu_apps.conf rename to collectors/charts.d.plugin/cpu_apps/cpu_apps.conf diff --git a/collectors/charts.d.plugin/cpufreq/README.md b/collectors/charts.d.plugin/cpufreq/README.md new file mode 100644 index 0000000000..d82951aacc --- /dev/null +++ b/collectors/charts.d.plugin/cpufreq/README.md @@ -0,0 +1,2 @@ +> THIS MODULE IS OBSOLETE. +> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT diff --git a/charts.d/cpufreq.chart.sh b/collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh similarity index 100% rename from charts.d/cpufreq.chart.sh rename to collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh diff --git a/conf.d/charts.d/cpufreq.conf b/collectors/charts.d.plugin/cpufreq/cpufreq.conf similarity index 100% rename from conf.d/charts.d/cpufreq.conf rename to collectors/charts.d.plugin/cpufreq/cpufreq.conf diff --git a/collectors/charts.d.plugin/example/README.md b/collectors/charts.d.plugin/example/README.md new file mode 100644 index 0000000000..bfd5e210ae --- /dev/null +++ b/collectors/charts.d.plugin/example/README.md @@ -0,0 +1,2 @@ +This is just an example charts.d data collector. + diff --git a/charts.d/example.chart.sh b/collectors/charts.d.plugin/example/example.chart.sh similarity index 100% rename from charts.d/example.chart.sh rename to collectors/charts.d.plugin/example/example.chart.sh diff --git a/conf.d/charts.d/example.conf b/collectors/charts.d.plugin/example/example.conf similarity index 100% rename from conf.d/charts.d/example.conf rename to collectors/charts.d.plugin/example/example.conf diff --git a/collectors/charts.d.plugin/exim/README.md b/collectors/charts.d.plugin/exim/README.md new file mode 100644 index 0000000000..d82951aacc --- /dev/null +++ b/collectors/charts.d.plugin/exim/README.md @@ -0,0 +1,2 @@ +> THIS MODULE IS OBSOLETE. +> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT diff --git a/charts.d/exim.chart.sh b/collectors/charts.d.plugin/exim/exim.chart.sh similarity index 100% rename from charts.d/exim.chart.sh rename to collectors/charts.d.plugin/exim/exim.chart.sh diff --git a/conf.d/charts.d/exim.conf b/collectors/charts.d.plugin/exim/exim.conf similarity index 100% rename from conf.d/charts.d/exim.conf rename to collectors/charts.d.plugin/exim/exim.conf diff --git a/collectors/charts.d.plugin/hddtemp/README.md b/collectors/charts.d.plugin/hddtemp/README.md new file mode 100644 index 0000000000..98f18900ce --- /dev/null +++ b/collectors/charts.d.plugin/hddtemp/README.md @@ -0,0 +1,28 @@ +> THIS MODULE IS OBSOLETE. +> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT + +# hddtemp + +The plugin will collect temperatures from disks + +It will create one chart with all active disks + +1. **temperature in Celsius** + +### configuration + +hddtemp needs to be running in daemonized mode + +```sh +# host with daemonized hddtemp +hddtemp_host="localhost" + +# port on which hddtemp is showing data +hddtemp_port="7634" + +# array of included disks +# the default is to include all +hddtemp_disks=() +``` + +--- diff --git a/charts.d/hddtemp.chart.sh b/collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh similarity index 100% rename from charts.d/hddtemp.chart.sh rename to collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh diff --git a/conf.d/charts.d/hddtemp.conf b/collectors/charts.d.plugin/hddtemp/hddtemp.conf similarity index 100% rename from conf.d/charts.d/hddtemp.conf rename to collectors/charts.d.plugin/hddtemp/hddtemp.conf diff --git a/collectors/charts.d.plugin/libreswan/README.md b/collectors/charts.d.plugin/libreswan/README.md new file mode 100644 index 0000000000..41026cf725 --- /dev/null +++ b/collectors/charts.d.plugin/libreswan/README.md @@ -0,0 +1,42 @@ +# libreswan + +The plugin will collects bytes-in, bytes-out and uptime for all established libreswan IPSEC tunnels. + +The following charts are created, **per tunnel**: + +1. **Uptime** + + * the uptime of the tunnel + +2. **Traffic** + + * bytes in + * bytes out + +### configuration + +Its config file is `/etc/netdata/charts.d/libreswan.conf`. + +The plugin executes 2 commands to collect all the information it needs: + +```sh +ipsec whack --status +ipsec whack --trafficstatus +``` + +The first command is used to extract the currently established tunnels, their IDs and their names. +The second command is used to extract the current uptime and traffic. + +Most probably user `netdata` will not be able to query libreswan, so the `ipsec` commands will be denied. +The plugin attempts to run `ipsec` as `sudo ipsec ...`, to get access to libreswan statistics. + +To allow user `netdata` execute `sudo ipsec ...`, create the file `/etc/sudoers.d/netdata` with this content: + +``` +netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status +netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus +``` + +Make sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path). + +--- diff --git a/charts.d/libreswan.chart.sh b/collectors/charts.d.plugin/libreswan/libreswan.chart.sh similarity index 100% rename from charts.d/libreswan.chart.sh rename to collectors/charts.d.plugin/libreswan/libreswan.chart.sh diff --git a/conf.d/charts.d/libreswan.conf b/collectors/charts.d.plugin/libreswan/libreswan.conf similarity index 100% rename from conf.d/charts.d/libreswan.conf rename to collectors/charts.d.plugin/libreswan/libreswan.conf diff --git a/collectors/charts.d.plugin/load_average/README.md b/collectors/charts.d.plugin/load_average/README.md new file mode 100644 index 0000000000..39d3b81894 --- /dev/null +++ b/collectors/charts.d.plugin/load_average/README.md @@ -0,0 +1,2 @@ +> THIS MODULE IS OBSOLETE. +> THE NETDATA DAEMON COLLECTS LOAD AVERAGE BY ITSELF diff --git a/charts.d/load_average.chart.sh b/collectors/charts.d.plugin/load_average/load_average.chart.sh similarity index 100% rename from charts.d/load_average.chart.sh rename to collectors/charts.d.plugin/load_average/load_average.chart.sh diff --git a/conf.d/charts.d/load_average.conf b/collectors/charts.d.plugin/load_average/load_average.conf similarity index 100% rename from conf.d/charts.d/load_average.conf rename to collectors/charts.d.plugin/load_average/load_average.conf diff --git a/plugins.d/loopsleepms.sh.inc b/collectors/charts.d.plugin/loopsleepms.sh.inc similarity index 100% rename from plugins.d/loopsleepms.sh.inc rename to collectors/charts.d.plugin/loopsleepms.sh.inc diff --git a/collectors/charts.d.plugin/mem_apps/README.md b/collectors/charts.d.plugin/mem_apps/README.md new file mode 100644 index 0000000000..cd8adf0a20 --- /dev/null +++ b/collectors/charts.d.plugin/mem_apps/README.md @@ -0,0 +1,2 @@ +> THIS MODULE IS OBSOLETE. +> USE APPS.PLUGIN. diff --git a/charts.d/mem_apps.chart.sh b/collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh similarity index 100% rename from charts.d/mem_apps.chart.sh rename to collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh diff --git a/conf.d/charts.d/mem_apps.conf b/collectors/charts.d.plugin/mem_apps/mem_apps.conf similarity index 100% rename from conf.d/charts.d/mem_apps.conf rename to collectors/charts.d.plugin/mem_apps/mem_apps.conf diff --git a/collectors/charts.d.plugin/mysql/README.md b/collectors/charts.d.plugin/mysql/README.md new file mode 100644 index 0000000000..6765b53abe --- /dev/null +++ b/collectors/charts.d.plugin/mysql/README.md @@ -0,0 +1,81 @@ +> THIS MODULE IS OBSOLETE. +> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT + +# mysql + +The plugin will monitor one or more mysql servers + +It will produce the following charts: + +1. **Bandwidth** in kbps + * in + * out + +2. **Queries** in queries/sec + * queries + * questions + * slow queries + +3. **Operations** in operations/sec + * opened tables + * flush + * commit + * delete + * prepare + * read first + * read key + * read next + * read prev + * read random + * read random next + * rollback + * save point + * update + * write + +4. **Table Locks** in locks/sec + * immediate + * waited + +5. **Select Issues** in issues/sec + * full join + * full range join + * range + * range check + * scan + +6. **Sort Issues** in issues/sec + * merge passes + * range + * scan + +### configuration + +You can configure many database servers, like this: + +You can provide, per server, the following: + +1. a name, anything you like, but keep it short +2. the mysql command to connect to the server +3. the mysql command line options to be used for connecting to the server + +Here is an example for 2 servers: + +```sh +mysql_opts[server1]="-h server1.example.com" +mysql_opts[server2]="-h server2.example.com --connect_timeout 2" +``` + +The above will use the `mysql` command found in the system path. +You can also provide a custom mysql command per server, like this: + +```sh +mysql_cmds[server2]="/opt/mysql/bin/mysql" +``` + +The above sets the mysql command only for server2. server1 will use the system default. + +If no configuration is given, the plugin will attempt to connect to mysql server at localhost. + + +--- diff --git a/charts.d/mysql.chart.sh b/collectors/charts.d.plugin/mysql/mysql.chart.sh similarity index 100% rename from charts.d/mysql.chart.sh rename to collectors/charts.d.plugin/mysql/mysql.chart.sh diff --git a/conf.d/charts.d/mysql.conf b/collectors/charts.d.plugin/mysql/mysql.conf similarity index 100% rename from conf.d/charts.d/mysql.conf rename to collectors/charts.d.plugin/mysql/mysql.conf diff --git a/collectors/charts.d.plugin/nginx/README.md b/collectors/charts.d.plugin/nginx/README.md new file mode 100644 index 0000000000..d82951aacc --- /dev/null +++ b/collectors/charts.d.plugin/nginx/README.md @@ -0,0 +1,2 @@ +> THIS MODULE IS OBSOLETE. +> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT diff --git a/charts.d/nginx.chart.sh b/collectors/charts.d.plugin/nginx/nginx.chart.sh similarity index 100% rename from charts.d/nginx.chart.sh rename to collectors/charts.d.plugin/nginx/nginx.chart.sh diff --git a/conf.d/charts.d/nginx.conf b/collectors/charts.d.plugin/nginx/nginx.conf similarity index 100% rename from conf.d/charts.d/nginx.conf rename to collectors/charts.d.plugin/nginx/nginx.conf diff --git a/collectors/charts.d.plugin/nut/README.md b/collectors/charts.d.plugin/nut/README.md new file mode 100644 index 0000000000..71906f55a5 --- /dev/null +++ b/collectors/charts.d.plugin/nut/README.md @@ -0,0 +1,59 @@ +# nut + +The plugin will collect UPS data for all UPSes configured in the system. + +The following charts will be created: + +1. **UPS Charge** + + * percentage changed + +2. **UPS Battery Voltage** + + * current voltage + * high voltage + * low voltage + * nominal voltage + +3. **UPS Input Voltage** + + * current voltage + * fault voltage + * nominal voltage + +4. **UPS Input Current** + + * nominal current + +5. **UPS Input Frequency** + + * current frequency + * nominal frequency + +6. **UPS Output Voltage** + + * current voltage + +7. **UPS Load** + + * current load + +8. **UPS Temperature** + + * current temperature + + +### configuration + +This is the internal default for `/etc/netdata/nut.conf` + +```sh +# a space separated list of UPS names +# if empty, the list returned by 'upsc -l' will be used +nut_ups= + +# how frequently to collect UPS data +nut_update_every=2 +``` + +--- diff --git a/charts.d/nut.chart.sh b/collectors/charts.d.plugin/nut/nut.chart.sh similarity index 100% rename from charts.d/nut.chart.sh rename to collectors/charts.d.plugin/nut/nut.chart.sh diff --git a/conf.d/charts.d/nut.conf b/collectors/charts.d.plugin/nut/nut.conf similarity index 100% rename from conf.d/charts.d/nut.conf rename to collectors/charts.d.plugin/nut/nut.conf diff --git a/python.d/python_modules/bases/FrameworkServices/__init__.py b/collectors/charts.d.plugin/opensips/README.md similarity index 100% rename from python.d/python_modules/bases/FrameworkServices/__init__.py rename to collectors/charts.d.plugin/opensips/README.md diff --git a/charts.d/opensips.chart.sh b/collectors/charts.d.plugin/opensips/opensips.chart.sh similarity index 100% rename from charts.d/opensips.chart.sh rename to collectors/charts.d.plugin/opensips/opensips.chart.sh diff --git a/conf.d/charts.d/opensips.conf b/collectors/charts.d.plugin/opensips/opensips.conf similarity index 100% rename from conf.d/charts.d/opensips.conf rename to collectors/charts.d.plugin/opensips/opensips.conf diff --git a/collectors/charts.d.plugin/phpfpm/README.md b/collectors/charts.d.plugin/phpfpm/README.md new file mode 100644 index 0000000000..d82951aacc --- /dev/null +++ b/collectors/charts.d.plugin/phpfpm/README.md @@ -0,0 +1,2 @@ +> THIS MODULE IS OBSOLETE. +> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT diff --git a/charts.d/phpfpm.chart.sh b/collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh similarity index 100% rename from charts.d/phpfpm.chart.sh rename to collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh diff --git a/conf.d/charts.d/phpfpm.conf b/collectors/charts.d.plugin/phpfpm/phpfpm.conf similarity index 100% rename from conf.d/charts.d/phpfpm.conf rename to collectors/charts.d.plugin/phpfpm/phpfpm.conf diff --git a/collectors/charts.d.plugin/postfix/README.md b/collectors/charts.d.plugin/postfix/README.md new file mode 100644 index 0000000000..5fc265d561 --- /dev/null +++ b/collectors/charts.d.plugin/postfix/README.md @@ -0,0 +1,26 @@ +> THIS MODULE IS OBSOLETE. +> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT + +# postfix + +The plugin will collect the postfix queue size. + +It will create two charts: + +1. **queue size in emails** +2. **queue size in KB** + +### configuration + +This is the internal default for `/etc/netdata/postfix.conf` + +```sh +# the postqueue command +# if empty, it will use the one found in the system path +postfix_postqueue= + +# how frequently to collect queue size +postfix_update_every=15 +``` + +--- diff --git a/charts.d/postfix.chart.sh b/collectors/charts.d.plugin/postfix/postfix.chart.sh similarity index 100% rename from charts.d/postfix.chart.sh rename to collectors/charts.d.plugin/postfix/postfix.chart.sh diff --git a/conf.d/charts.d/postfix.conf b/collectors/charts.d.plugin/postfix/postfix.conf similarity index 100% rename from conf.d/charts.d/postfix.conf rename to collectors/charts.d.plugin/postfix/postfix.conf diff --git a/collectors/charts.d.plugin/sensors/README.md b/collectors/charts.d.plugin/sensors/README.md new file mode 100644 index 0000000000..ddc3650d65 --- /dev/null +++ b/collectors/charts.d.plugin/sensors/README.md @@ -0,0 +1,52 @@ +> THIS MODULE IS OBSOLETE. +> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT + +> Unlike the python one, this module can collect temperature on RPi. + +# sensors + +The plugin will provide charts for all configured system sensors + +> This plugin is reading sensors directly from the kernel. +> The `lm-sensors` package is able to perform calculations on the +> kernel provided values, this plugin will not perform. +> So, the values graphed, are the raw hardware values of the sensors. + +The plugin will create netdata charts for: + +1. **Temperature** +2. **Voltage** +3. **Current** +4. **Power** +5. **Fans Speed** +6. **Energy** +7. **Humidity** + +One chart for every sensor chip found and each of the above will be created. + +### configuration + +This is the internal default for `/etc/netdata/sensors.conf` + +```sh +# the directory the kernel keeps sensor data +sensors_sys_dir="${NETDATA_HOST_PREFIX}/sys/devices" + +# how deep in the tree to check for sensor data +sensors_sys_depth=10 + +# if set to 1, the script will overwrite internal +# script functions with code generated ones +# leave to 1, is faster +sensors_source_update=1 + +# how frequently to collect sensor data +# the default is to collect it at every iteration of charts.d +sensors_update_every= + +# array of sensors which are excluded +# the default is to include all +sensors_excluded=() +``` + +--- diff --git a/charts.d/sensors.chart.sh b/collectors/charts.d.plugin/sensors/sensors.chart.sh similarity index 100% rename from charts.d/sensors.chart.sh rename to collectors/charts.d.plugin/sensors/sensors.chart.sh diff --git a/conf.d/charts.d/sensors.conf b/collectors/charts.d.plugin/sensors/sensors.conf similarity index 100% rename from conf.d/charts.d/sensors.conf rename to collectors/charts.d.plugin/sensors/sensors.conf diff --git a/collectors/charts.d.plugin/squid/README.md b/collectors/charts.d.plugin/squid/README.md new file mode 100644 index 0000000000..0934ccfcf2 --- /dev/null +++ b/collectors/charts.d.plugin/squid/README.md @@ -0,0 +1,66 @@ +> THIS MODULE IS OBSOLETE. +> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT + + +# squid + +The plugin will monitor a squid server. + +It will produce 4 charts: + +1. **Squid Client Bandwidth** in kbps + + * in + * out + * hits + +2. **Squid Client Requests** in requests/sec + + * requests + * hits + * errors + +3. **Squid Server Bandwidth** in kbps + + * in + * out + +4. **Squid Server Requests** in requests/sec + + * requests + * errors + +### autoconfig + +The plugin will by itself detect squid servers running on +localhost, on ports 3128 or 8080. + +It will attempt to download URLs in the form: + +- `cache_object://HOST:PORT/counters` +- `/squid-internal-mgr/counters` + +If any succeeds, it will use this. + +### configuration + +If you need to configure it by hand, create the file +`/etc/netdata/squid.conf` with the following variables: + +- `squid_host=IP` the IP of the squid host +- `squid_port=PORT` the port the squid is listening +- `squid_url="URL"` the URL with the statistics to be fetched from squid +- `squid_timeout=SECONDS` how much time we should wait for squid to respond +- `squid_update_every=SECONDS` the frequency of the data collection + +Example `/etc/netdata/squid.conf`: + +```sh +squid_host=127.0.0.1 +squid_port=3128 +squid_url="cache_object://127.0.0.1:3128/counters" +squid_timeout=2 +squid_update_every=5 +``` + +--- diff --git a/charts.d/squid.chart.sh b/collectors/charts.d.plugin/squid/squid.chart.sh similarity index 100% rename from charts.d/squid.chart.sh rename to collectors/charts.d.plugin/squid/squid.chart.sh diff --git a/conf.d/charts.d/squid.conf b/collectors/charts.d.plugin/squid/squid.conf similarity index 100% rename from conf.d/charts.d/squid.conf rename to collectors/charts.d.plugin/squid/squid.conf diff --git a/collectors/charts.d.plugin/tomcat/README.md b/collectors/charts.d.plugin/tomcat/README.md new file mode 100644 index 0000000000..d82951aacc --- /dev/null +++ b/collectors/charts.d.plugin/tomcat/README.md @@ -0,0 +1,2 @@ +> THIS MODULE IS OBSOLETE. +> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT diff --git a/charts.d/tomcat.chart.sh b/collectors/charts.d.plugin/tomcat/tomcat.chart.sh similarity index 100% rename from charts.d/tomcat.chart.sh rename to collectors/charts.d.plugin/tomcat/tomcat.chart.sh diff --git a/conf.d/charts.d/tomcat.conf b/collectors/charts.d.plugin/tomcat/tomcat.conf similarity index 100% rename from conf.d/charts.d/tomcat.conf rename to collectors/charts.d.plugin/tomcat/tomcat.conf diff --git a/src/backends/json/Makefile.am b/collectors/checks.plugin/Makefile.am similarity index 63% rename from src/backends/json/Makefile.am rename to collectors/checks.plugin/Makefile.am index 8773fd098c..babdcf0df3 100644 --- a/src/backends/json/Makefile.am +++ b/collectors/checks.plugin/Makefile.am @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-3.0-or-later AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in diff --git a/src/plugins/checks.plugin/plugin_checks.c b/collectors/checks.plugin/plugin_checks.c similarity index 100% rename from src/plugins/checks.plugin/plugin_checks.c rename to collectors/checks.plugin/plugin_checks.c diff --git a/src/plugins/checks.plugin/plugin_checks.h b/collectors/checks.plugin/plugin_checks.h similarity index 94% rename from src/plugins/checks.plugin/plugin_checks.h rename to collectors/checks.plugin/plugin_checks.h index 9c3fa60f47..93494765d7 100644 --- a/src/plugins/checks.plugin/plugin_checks.h +++ b/collectors/checks.plugin/plugin_checks.h @@ -3,7 +3,7 @@ #ifndef NETDATA_PLUGIN_CHECKS_H #define NETDATA_PLUGIN_CHECKS_H 1 -#include "../../common.h" +#include "../../daemon/common.h" #ifdef NETDATA_INTERNAL_CHECKS diff --git a/collectors/diskspace.plugin/Makefile.am b/collectors/diskspace.plugin/Makefile.am new file mode 100644 index 0000000000..19554bed8e --- /dev/null +++ b/collectors/diskspace.plugin/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/collectors/diskspace.plugin/README.md b/collectors/diskspace.plugin/README.md new file mode 100644 index 0000000000..920b3123be --- /dev/null +++ b/collectors/diskspace.plugin/README.md @@ -0,0 +1,5 @@ +> for disks performance monitoring, see the `proc` plugin, [here](../linux-proc.plugin/#monitoring-disks-performance-with-netdata) + +# diskspace.plugin + +This plugin monitors the disk space usage of mounted disks, under Linux. diff --git a/src/plugins/linux-diskspace.plugin/plugin_diskspace.c b/collectors/diskspace.plugin/plugin_diskspace.c similarity index 100% rename from src/plugins/linux-diskspace.plugin/plugin_diskspace.c rename to collectors/diskspace.plugin/plugin_diskspace.c diff --git a/src/plugins/linux-diskspace.plugin/plugin_diskspace.h b/collectors/diskspace.plugin/plugin_diskspace.h similarity index 90% rename from src/plugins/linux-diskspace.plugin/plugin_diskspace.h rename to collectors/diskspace.plugin/plugin_diskspace.h index 4dd01f6d28..7c9df9d139 100644 --- a/src/plugins/linux-diskspace.plugin/plugin_diskspace.h +++ b/collectors/diskspace.plugin/plugin_diskspace.h @@ -3,7 +3,7 @@ #ifndef NETDATA_PLUGIN_PROC_DISKSPACE_H #define NETDATA_PLUGIN_PROC_DISKSPACE_H -#include "../../common.h" +#include "../../daemon/common.h" #if (TARGET_OS == OS_LINUX) @@ -21,7 +21,7 @@ extern void *diskspace_main(void *ptr); -#include "../linux-proc.plugin/plugin_proc.h" +#include "../proc.plugin/plugin_proc.h" #else // (TARGET_OS == OS_LINUX) diff --git a/collectors/fping.plugin/Makefile.am b/collectors/fping.plugin/Makefile.am new file mode 100644 index 0000000000..4395394db9 --- /dev/null +++ b/collectors/fping.plugin/Makefile.am @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +CLEANFILES = \ + fping.plugin \ + $(NULL) + +include $(top_srcdir)/build/subst.inc +SUFFIXES = .in + +dist_plugins_SCRIPTS = \ + fping.plugin \ + $(NULL) + +dist_noinst_DATA = \ + fping.plugin.in \ + README.md \ + $(NULL) + +dist_libconfig_DATA = \ + fping.conf \ + $(NULL) diff --git a/collectors/fping.plugin/README.md b/collectors/fping.plugin/README.md new file mode 100644 index 0000000000..597d381717 --- /dev/null +++ b/collectors/fping.plugin/README.md @@ -0,0 +1,103 @@ +# fping.plugin + +The fping plugin supports monitoring latency, packet loss and uptime of any number of hosts, by pinging them with fping. + +A recent version of `fping` is required (one that supports option ` -N `). The supplied plugin can install it. Run: + +```sh +/usr/libexec/netdata/plugins.d/fping.plugin install +``` + +The above will download, build and install the right version as `/usr/local/bin/fping`. + +Then you need to edit `/etc/netdata/fping.conf` (to edit it on your system run `/etc/netdata/edit-config fping.conf`) like this: + +```sh +# uncomment the following line - it should already be there +fping="/usr/local/bin/fping" + +# set here all the hosts you need to ping +# I suggest to use hostnames and put their IPs in /etc/hosts +hosts="host1 host2 host3" + +# override the chart update frequency - the default is inherited from netdata +update_every=1 + +# time in milliseconds (1 sec = 1000 ms) to ping the hosts +# 200 = 5 pings per second +ping_every=200 + +# other fping options - these are the defaults +fping_opts="-R -b 56 -i 1 -r 0 -t 5000" +``` + +The latest version of the config: https://github.com/netdata/netdata/blob/master/conf.d/fping.conf + +## alarms + +netdata will automatically attach a few alarms for each host. +Check the latest versions of the fping alarms here: https://github.com/netdata/netdata/blob/master/conf.d/health.d/fping.conf + +## Additional Tips + +### Customizing Amount of Pings Per Second + +For example, to update the chart every 10 seconds and use 2 pings every 10 seconds, use this: + +```sh +# Chart Update Frequency (Time in Seconds) +update_every=10 + +# Time in Milliseconds (1 sec = 1000 ms) to Ping the Hosts +# The Following Example Sends 1 Ping Every 5000 ms +# Calculation Formula: ping_every = (update_every * 1000 ) / 2 +ping_every=5000 +``` + +### Multiple fping Plugins With Different Settings + +You may need to run multiple fping plugins with different settings for different hosts. For example, you may need to ping a few hosts 10 times per second, and others once per second. + +netdata allows you to add as many `fping` plugins as you like. + +Follow this procedure: + +**1. Create New fping Configuration File** + +Step Into Configuration Directory + +```sh +cd /etc/netdata +``` + +Copy Original fping Configuration File To New Configuration File + +```sh +cp fping.conf fping2.conf +``` + +Edit `fping2.conf` and set the settings and the hosts you need + +**2. Soft Link Original fping Plugin to New Plugin File** + +Become root (If The Step Step Is Performed As Non-Root User) + +```sh +sudo su +``` + +Step Into The Plugins Directory + +```sh +cd /usr/libexec/netdata/plugins.d +``` + +Link fping.plugin to fping2.plugin + +```sh +ln -s fping.plugin fping2.plugin +``` + +That's it. netdata will detect the new plugin and start it. + +You can name the new plugin any name you like. Just make sure the plugin and the configuration file have the same name. diff --git a/conf.d/fping.conf b/collectors/fping.plugin/fping.conf similarity index 100% rename from conf.d/fping.conf rename to collectors/fping.plugin/fping.conf diff --git a/plugins.d/fping.plugin.in b/collectors/fping.plugin/fping.plugin.in similarity index 100% rename from plugins.d/fping.plugin.in rename to collectors/fping.plugin/fping.plugin.in diff --git a/src/plugins/apps.plugin/Makefile.am b/collectors/freebsd.plugin/Makefile.am similarity index 64% rename from src/plugins/apps.plugin/Makefile.am rename to collectors/freebsd.plugin/Makefile.am index 20504a2c6e..e80ec702d7 100644 --- a/src/plugins/apps.plugin/Makefile.am +++ b/collectors/freebsd.plugin/Makefile.am @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-3.0-or-later AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in diff --git a/src/plugins/freebsd.plugin/freebsd_devstat.c b/collectors/freebsd.plugin/freebsd_devstat.c similarity index 100% rename from src/plugins/freebsd.plugin/freebsd_devstat.c rename to collectors/freebsd.plugin/freebsd_devstat.c diff --git a/src/plugins/freebsd.plugin/freebsd_getifaddrs.c b/collectors/freebsd.plugin/freebsd_getifaddrs.c similarity index 100% rename from src/plugins/freebsd.plugin/freebsd_getifaddrs.c rename to collectors/freebsd.plugin/freebsd_getifaddrs.c diff --git a/src/plugins/freebsd.plugin/freebsd_getmntinfo.c b/collectors/freebsd.plugin/freebsd_getmntinfo.c similarity index 100% rename from src/plugins/freebsd.plugin/freebsd_getmntinfo.c rename to collectors/freebsd.plugin/freebsd_getmntinfo.c diff --git a/src/plugins/freebsd.plugin/freebsd_ipfw.c b/collectors/freebsd.plugin/freebsd_ipfw.c similarity index 100% rename from src/plugins/freebsd.plugin/freebsd_ipfw.c rename to collectors/freebsd.plugin/freebsd_ipfw.c diff --git a/src/plugins/freebsd.plugin/freebsd_kstat_zfs.c b/collectors/freebsd.plugin/freebsd_kstat_zfs.c similarity index 100% rename from src/plugins/freebsd.plugin/freebsd_kstat_zfs.c rename to collectors/freebsd.plugin/freebsd_kstat_zfs.c diff --git a/src/plugins/freebsd.plugin/freebsd_sysctl.c b/collectors/freebsd.plugin/freebsd_sysctl.c similarity index 100% rename from src/plugins/freebsd.plugin/freebsd_sysctl.c rename to collectors/freebsd.plugin/freebsd_sysctl.c diff --git a/src/plugins/freebsd.plugin/plugin_freebsd.c b/collectors/freebsd.plugin/plugin_freebsd.c similarity index 100% rename from src/plugins/freebsd.plugin/plugin_freebsd.c rename to collectors/freebsd.plugin/plugin_freebsd.c diff --git a/src/plugins/freebsd.plugin/plugin_freebsd.h b/collectors/freebsd.plugin/plugin_freebsd.h similarity index 98% rename from src/plugins/freebsd.plugin/plugin_freebsd.h rename to collectors/freebsd.plugin/plugin_freebsd.h index 0c44066b8c..5c66c534ca 100644 --- a/src/plugins/freebsd.plugin/plugin_freebsd.h +++ b/collectors/freebsd.plugin/plugin_freebsd.h @@ -3,7 +3,7 @@ #ifndef NETDATA_PLUGIN_FREEBSD_H #define NETDATA_PLUGIN_FREEBSD_H 1 -#include "../../common.h" +#include "../../daemon/common.h" #if (TARGET_OS == OS_FREEBSD) diff --git a/collectors/freeipmi.plugin/Makefile.am b/collectors/freeipmi.plugin/Makefile.am new file mode 100644 index 0000000000..19554bed8e --- /dev/null +++ b/collectors/freeipmi.plugin/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/collectors/freeipmi.plugin/README.md b/collectors/freeipmi.plugin/README.md new file mode 100644 index 0000000000..f7c5cc1483 --- /dev/null +++ b/collectors/freeipmi.plugin/README.md @@ -0,0 +1,180 @@ +netdata has a [freeipmi](https://www.gnu.org/software/freeipmi/) plugin. + +> FreeIPMI provides in-band and out-of-band IPMI software based on the IPMI v1.5/2.0 specification. The IPMI specification defines a set of interfaces for platform management and is implemented by a number vendors for system management. The features of IPMI that most users will be interested in are sensor monitoring, system event monitoring, power control, and serial-over-LAN (SOL). + +## compile `freeipmi.plugin` + +1. install `libipmimonitoring-dev` or `libipmimonitoring-devel` (`freeipmi-devel` on RHEL based OS) using the package manager of your system. + +2. re-install netdata from source. The installer will detect that the required libraries are now available and will also build `freeipmi.plugin`. + +Keep in mind IPMI requires root access, so the plugin is setuid to root. + +If you just installed the required IPMI tools, please run at least once the command `ipmimonitoring` and verify it returns sensors information. This command initialises IPMI configuration, so that the netdata plugin will be able to work. + +## netdata use + +The plugin creates (up to) 8 charts, based on the information collected from IPMI: + +1. number of sensors by state +2. number of events in SEL +3. Temperatures CELCIUS +4. Temperatures FAHRENHEIT +5. Voltages +6. Currents +7. Power +8. Fans + + +It also adds 2 alarms: + +1. Sensors in non-nominal state (i.e. warning and critical) +2. SEL is non empty + + + +The plugin does a speed test when it starts, to find out the duration needed by the IPMI processor to respond. Depending on the speed of your IPMI processor, charts may need several seconds to show up on the dashboard. + +## `freeipmi.plugin` configuration + +The plugin supports a few options. To see them, run: + +```sh +# /usr/libexec/netdata/plugins.d/freeipmi.plugin -h + + netdata freeipmi.plugin 1.8.0-546-g72ce5d6b_rolling + Copyright (C) 2016-2017 Costa Tsaousis <costa@tsaousis.gr> + Released under GNU General Public License v3 or later. + All rights reserved. + + This program is a data collector plugin for netdata. + + Available command line options: + + SECONDS data collection frequency + minimum: 5 + + debug enable verbose output + default: disabled + + sel + no-sel enable/disable SEL collection + default: enabled + + hostname HOST + username USER + password PASS connect to remote IPMI host + default: local IPMI processor + + sdr-cache-dir PATH directory for SDR cache files + default: /tmp + + sensor-config-file FILE filename to read sensor configuration + default: system default + + ignore N1,N2,N3,... sensor IDs to ignore + default: none + + -v + -V + version print version and exit + + Linux kernel module for IPMI is CPU hungry. + On Linux run this to lower kipmiN CPU utilization: + # echo 10 > /sys/module/ipmi_si/parameters/kipmid_max_busy_us + + or create: /etc/modprobe.d/ipmi.conf with these contents: + options ipmi_si kipmid_max_busy_us=10 + + For more information: + https://github.com/ktsaou/netdata/tree/master/plugins/freeipmi.plugin + +``` + +You can set these options in `/etc/netdata/netdata.conf` at this section: + +``` +[plugin:freeipmi] + update every = 5 + command options = +``` + +Append to `command options = ` the settings you need. The minimum `update every` is 5 (enforced internally by the plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable. + +## ignoring specific sensors + +Specific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to be ignored at `ipmi-sensors-exclude-record-ids`. **However this file is not used by `libipmimonitoring`** (the library used by netdata's `freeipmi.plugin`). + +So, `freeipmi.plugin` supports the option `ignore` that accepts a comma separated list of sensor IDs to ignore. To configure it, edit `/etc/netdata/netdata.conf` and set: + +``` +[plugin:freeipmi] + command options = ignore 1,2,3,4,... +``` + +To find the IDs to ignore, run the command `ipmimonitoring`. The first column is the wanted ID: + +``` +ID | Name | Type | State | Reading | Units | Event +1 | Ambient Temp | Temperature | Nominal | 26.00 | C | 'OK' +2 | Altitude | Other Units Based Sensor | Nominal | 480.00 | ft | 'OK' +3 | Avg Power | Current | Nominal | 100.00 | W | 'OK' +4 | Planar 3.3V | Voltage | Nominal | 3.29 | V | 'OK' +5 | Planar 5V | Voltage | Nominal | 4.90 | V | 'OK' +6 | Planar 12V | Voltage | Nominal | 11.99 | V | 'OK' +7 | Planar VBAT | Voltage | Nominal | 2.95 | V | 'OK' +8 | Fan 1A Tach | Fan | Nominal | 3132.00 | RPM | 'OK' +9 | Fan 1B Tach | Fan | Nominal | 2150.00 | RPM | 'OK' +10 | Fan 2A Tach | Fan | Nominal | 2494.00 | RPM | 'OK' +11 | Fan 2B Tach | Fan | Nominal | 1825.00 | RPM | 'OK' +12 | Fan 3A Tach | Fan | Nominal | 3538.00 | RPM | 'OK' +13 | Fan 3B Tach | Fan | Nominal | 2625.00 | RPM | 'OK' +14 | Fan 1 | Entity Presence | Nominal | N/A | N/A | 'Entity Present' +15 | Fan 2 | Entity Presence | Nominal | N/A | N/A | 'Entity Present' +... +``` + + +## debugging + +You can run the plugin by hand: + +```sh +# become user netdata +sudo su -s /bin/sh netdata + +# run the plugin in debug mode +/usr/libexec/netdata/plugins.d/freeipmi.plugin 5 debug +``` + +You will get verbose output on what the plugin does. + +## kipmi0 CPU usage + +There have been reports that kipmi is showing increased CPU when the IPMI is queried. + +[IBM has given a few explanations](http://www-01.ibm.com/support/docview.wss?uid=nas7d580df3d15874988862575fa0050f604). + +Check also [this stackexchange post](http://unix.stackexchange.com/questions/74900/kipmi0-eating-up-to-99-8-cpu-on-centos-6-4). + +To lower the CPU consumption of the system you can issue this command: + +```sh +echo 10 > /sys/module/ipmi_si/parameters/kipmid_max_busy_us +``` + +You can also permanently set the above setting by creating the file `/etc/modprobe.d/ipmi.conf` with this content: + +```sh +# prevent kipmi from consuming 100% CPU +options ipmi_si kipmid_max_busy_us=10 +``` + +This instructs the kernel IPMI module to pause for a tick between checking IPMI. Querying IPMI will be a lot slower now (e.g. several seconds for IPMI to respond), but `kipmi` will not use any noticeable CPU. You can also use a higher number (this is the number of microseconds to poll IPMI for a response, before waiting for a tick). + +If you need to disable IPMI for netdata, edit `/etc/netdata/netdata.conf` and set: + +``` +[plugins] + freeipmi = no +``` diff --git a/src/plugins/linux-freeipmi.plugin/freeipmi_plugin.c b/collectors/freeipmi.plugin/freeipmi_plugin.c similarity index 99% rename from src/plugins/linux-freeipmi.plugin/freeipmi_plugin.c rename to collectors/freeipmi.plugin/freeipmi_plugin.c index 0a47fb44f0..a1cff3af06 100644 --- a/src/plugins/linux-freeipmi.plugin/freeipmi_plugin.c +++ b/collectors/freeipmi.plugin/freeipmi_plugin.c @@ -1624,7 +1624,7 @@ int main (int argc, char **argv) { " options ipmi_si kipmid_max_busy_us=10\n" "\n" " For more information:\n" - " https://github.com/netdata/netdata/wiki/monitoring-IPMI\n" + " https://github.com/ktsaou/netdata/tree/master/plugins/freeipmi.plugin\n" "\n" , VERSION , netdata_update_every diff --git a/collectors/idlejitter.plugin/Makefile.am b/collectors/idlejitter.plugin/Makefile.am new file mode 100644 index 0000000000..19554bed8e --- /dev/null +++ b/collectors/idlejitter.plugin/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/collectors/idlejitter.plugin/README.md b/collectors/idlejitter.plugin/README.md new file mode 100644 index 0000000000..3c2080536d --- /dev/null +++ b/collectors/idlejitter.plugin/README.md @@ -0,0 +1,13 @@ +## idlejitter.plugin + +It works like this: + +A thread is spawn that requests to sleep for 20000 microseconds (20ms). +When the system wakes it up, it measures how many microseconds have passed. +The difference between the requested and the actual duration of the sleep, is the idle jitter. +This is done at most 50 times per second, to ensure we have a good average. + +This number is useful: + + 1. in real-time environments, when the CPU jitter can affect the quality of the service (like VoIP media gateways). + 2. in cloud infrastructure, at can pause the VM or container for a small duration to perform operations at the host. diff --git a/src/plugins/idlejitter.plugin/plugin_idlejitter.c b/collectors/idlejitter.plugin/plugin_idlejitter.c similarity index 100% rename from src/plugins/idlejitter.plugin/plugin_idlejitter.c rename to collectors/idlejitter.plugin/plugin_idlejitter.c diff --git a/src/plugins/idlejitter.plugin/plugin_idlejitter.h b/collectors/idlejitter.plugin/plugin_idlejitter.h similarity index 94% rename from src/plugins/idlejitter.plugin/plugin_idlejitter.h rename to collectors/idlejitter.plugin/plugin_idlejitter.h index e3561e1c00..62fabea168 100644 --- a/src/plugins/idlejitter.plugin/plugin_idlejitter.h +++ b/collectors/idlejitter.plugin/plugin_idlejitter.h @@ -3,7 +3,7 @@ #ifndef NETDATA_PLUGIN_IDLEJITTER_H #define NETDATA_PLUGIN_IDLEJITTER_H 1 -#include "../../common.h" +#include "../../daemon/common.h" #define NETDATA_PLUGIN_HOOK_IDLEJITTER \ { \ diff --git a/collectors/macos.plugin/Makefile.am b/collectors/macos.plugin/Makefile.am new file mode 100644 index 0000000000..babdcf0df3 --- /dev/null +++ b/collectors/macos.plugin/Makefile.am @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in diff --git a/src/plugins/macos.plugin/macos_fw.c b/collectors/macos.plugin/macos_fw.c similarity index 100% rename from src/plugins/macos.plugin/macos_fw.c rename to collectors/macos.plugin/macos_fw.c diff --git a/src/plugins/macos.plugin/macos_mach_smi.c b/collectors/macos.plugin/macos_mach_smi.c similarity index 100% rename from src/plugins/macos.plugin/macos_mach_smi.c rename to collectors/macos.plugin/macos_mach_smi.c diff --git a/src/plugins/macos.plugin/macos_sysctl.c b/collectors/macos.plugin/macos_sysctl.c similarity index 100% rename from src/plugins/macos.plugin/macos_sysctl.c rename to collectors/macos.plugin/macos_sysctl.c diff --git a/src/plugins/macos.plugin/plugin_macos.c b/collectors/macos.plugin/plugin_macos.c similarity index 100% rename from src/plugins/macos.plugin/plugin_macos.c rename to collectors/macos.plugin/plugin_macos.c diff --git a/src/plugins/macos.plugin/plugin_macos.h b/collectors/macos.plugin/plugin_macos.h similarity index 96% rename from src/plugins/macos.plugin/plugin_macos.h rename to collectors/macos.plugin/plugin_macos.h index 5fa2766d2e..0815c59c31 100644 --- a/src/plugins/macos.plugin/plugin_macos.h +++ b/collectors/macos.plugin/plugin_macos.h @@ -4,7 +4,7 @@ #ifndef NETDATA_PLUGIN_MACOS_H #define NETDATA_PLUGIN_MACOS_H 1 -#include "../../common.h" +#include "../../daemon/common.h" #if (TARGET_OS == OS_MACOS) diff --git a/collectors/nfacct.plugin/Makefile.am b/collectors/nfacct.plugin/Makefile.am new file mode 100644 index 0000000000..19554bed8e --- /dev/null +++ b/collectors/nfacct.plugin/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/collectors/nfacct.plugin/README.md b/collectors/nfacct.plugin/README.md new file mode 100644 index 0000000000..814b479151 --- /dev/null +++ b/collectors/nfacct.plugin/README.md @@ -0,0 +1,10 @@ +# nfacct.plugin + +This plugin that collects NFACCT statistics. + +It is currently disabled by default, because it requires root access. +We have to move the code to an external plugin to setuid just the plugin not the whole netdata server. + +You can build netdata with it to test it though. +Just run `./configure` (or `netdata-installer.sh`) with the option `--enable-plugin-nfacct` (and any other options you may need). +Remember, you have to tell netdata you want it to run as `root` for this plugin to work. diff --git a/src/plugins/linux-nfacct.plugin/plugin_nfacct.c b/collectors/nfacct.plugin/plugin_nfacct.c similarity index 100% rename from src/plugins/linux-nfacct.plugin/plugin_nfacct.c rename to collectors/nfacct.plugin/plugin_nfacct.c diff --git a/src/plugins/linux-nfacct.plugin/plugin_nfacct.h b/collectors/nfacct.plugin/plugin_nfacct.h similarity index 95% rename from src/plugins/linux-nfacct.plugin/plugin_nfacct.h rename to collectors/nfacct.plugin/plugin_nfacct.h index 7ff33d374a..4311ccecf7 100644 --- a/src/plugins/linux-nfacct.plugin/plugin_nfacct.h +++ b/collectors/nfacct.plugin/plugin_nfacct.h @@ -3,7 +3,7 @@ #ifndef NETDATA_NFACCT_H #define NETDATA_NFACCT_H 1 -#include "../../common.h" +#include "../../daemon/common.h" #if defined(INTERNAL_PLUGIN_NFACCT) diff --git a/collectors/node.d.plugin/Makefile.am b/collectors/node.d.plugin/Makefile.am new file mode 100644 index 0000000000..67d0e1d855 --- /dev/null +++ b/collectors/node.d.plugin/Makefile.am @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in +CLEANFILES = \ + node.d.plugin \ + $(NULL) + +include $(top_srcdir)/build/subst.inc +SUFFIXES = .in + +dist_libconfig_DATA = \ + node.d.conf \ + $(NULL) + +dist_plugins_SCRIPTS = \ + node.d.plugin \ + $(NULL) + +dist_noinst_DATA = \ + node.d.plugin.in \ + README.md \ + named/README.md \ + fronius/README.md \ + sma_webbox/README.md \ + snmp/README.md \ + stiebeleltron/README.md \ + $(NULL) + +nodeconfigdir=$(libconfigdir)/node.d +dist_nodeconfig_DATA = \ + $(NULL) + +dist_node_DATA = \ + named/named.node.js \ + fronius/fronius.node.js \ + sma_webbox/sma_webbox.node.js \ + snmp/snmp.node.js \ + stiebeleltron/stiebeleltron.node.js \ + $(NULL) + +nodemodulesdir=$(nodedir)/node_modules +dist_nodemodules_DATA = \ + node_modules/netdata.js \ + node_modules/extend.js \ + node_modules/pixl-xml.js \ + node_modules/net-snmp.js \ + node_modules/asn1-ber.js \ + $(NULL) + +nodemoduleslibberdir=$(nodedir)/node_modules/lib/ber +dist_nodemoduleslibber_DATA = \ + node_modules/lib/ber/index.js \ + node_modules/lib/ber/errors.js \ + node_modules/lib/ber/reader.js \ + node_modules/lib/ber/types.js \ + node_modules/lib/ber/writer.js \ + $(NULL) diff --git a/collectors/node.d.plugin/README.md b/collectors/node.d.plugin/README.md new file mode 100644 index 0000000000..dd977017d9 --- /dev/null +++ b/collectors/node.d.plugin/README.md @@ -0,0 +1,218 @@ +# node.d.plugin + +`node.d.plugin` is a netdata external plugin. It is an **orchestrator** for data collection modules written in `node.js`. + +1. It runs as an independent process `ps fax` shows it +2. It is started and stopped automatically by netdata +3. It communicates with netdata via a unidirectional pipe (sending data to the netdata daemon) +4. Supports any number of data collection **modules** +5. Allows each **module** to have one or more data collection **jobs** +6. Each **job** is collecting one or more metrics from a single data source + +# Motivation + +Node.js is perfect for asynchronous operations. It is very fast and quite common (actually the whole web is based on it). +Since data collection is not a CPU intensive task, node.js is an ideal solution for it. + +`node.d.plugin` is a netdata plugin that provides an abstraction layer to allow easy and quick development of data +collectors in node.js. It also manages all its data collectors (placed in `/usr/libexec/netdata/node.d`) using a single +instance of node, thus lowering the memory footprint of data collection. + +Of course, there can be independent plugins written in node.js (placed in `/usr/libexec/netdata/plugins`). +These will have to be developed using the guidelines of **[External Plugins](../plugins.d/)**. + +To run `node.js` plugins you need to have `node` installed in your system. + +In some older systems, the package named `node` is not node.js. It is a terminal emulation program called `ax25-node`. +In this case the node.js package may be referred as `nodejs`. Once you install `nodejs`, we suggest to link +`/usr/bin/nodejs` to `/usr/bin/node`, so that typing `node` in your terminal, opens node.js. +For more information check the **[[Installation]]** guide. + +## configuring `node.d.plugin` + +`node.d.plugin` can work even without any configuration. Its default configuration file is +[/etc/netdata/node.d.conf](node.d.conf) (to edit it on your system run `/etc/netdata/edit-config node.d.conf`). + +## configuring `node.d.plugin` modules + +`node.d.plugin` modules accept configuration in `JSON` format. + +Unfortunately, `JSON` files do not accept comments. So, the best way to describe them is to have markdown text files +with instructions. + +`JSON` has a very strict formatting. If you get errors from netdata at `/var/log/netdata/error.log` that a certain +configuration file cannot be loaded, we suggest to verify it at [http://jsonlint.com/](http://jsonlint.com/). + +The files in this directory, provide usable examples for configuring each `node.d.plugin` module. + + +## debugging modules written for node.d.plugin + +To test `node.d.plugin` modules, which are placed in `/usr/libexec/netdata/node.d`, you can run `node.d.plugin` by hand, +like this: + +```sh +# become user netdata +sudo su -s /bin/sh netdata + +# run the plugin in debug mode +/usr/libexec/netdata/plugins.d/node.d.plugin debug 1 X Y Z +``` + +`node.d.plugin` will run in `debug` mode (lots of debug info), with an update frequency of `1` second, evaluating only +the collector scripts `X` (i.e. `/usr/libexec/netdata/node.d/X.node.js`), `Y` and `Z`. +You can define zero or more modules. If none is defined, `node.d.plugin` will evaluate all modules available. + +Keep in mind that if your configs are not in `/etc/netdata`, you should do the following before running `node.d.plugin`: + +```sh +export NETDATA_USER_CONFIG_DIR="/path/to/etc/netdata" +``` + +--- + +## developing `node.d.plugin` modules + +Your data collection module should be split in 3 parts: + + - a function to fetch the data from its source. `node.d.plugin` already can fetch data from web sources, + so you don't need to do anything about it for http. + + - a function to process the fetched/manipulate the data fetched. This function will make a number of calls + to create charts and dimensions and pass the collected values to netdata. + This is the only function you need to write for collecting http JSON data. + + - a `configure` and an `update` function, which take care of your module configuration and data refresh + respectively. You can use the supplied ones. + +Your module will automatically be able to process any number of servers, with different settings (even different +data collection frequencies). You will write just the work needed for one and `node.d.plugin` will do the rest. +For each server you are going to fetch data from, you will have to create a `service` (more later). + +### writing the data collection module + +To provide a module called `mymodule`, you have create the file `/usr/libexec/netdata/node.d/mymodule.node.js`, with this structure: + +```js + +// the processor is needed only +// if you need a custom processor +// other than http +netdata.processors.myprocessor = { + name: 'myprocessor', + + process: function(service, callback) { + + /* do data collection here */ + + callback(data); + } +}; + +// this is the mymodule definition +var mymodule = { + processResponse: function(service, data) { + + /* send information to the netdata server here */ + + }, + + configure: function(config) { + var eligible_services = 0; + + if(typeof(config.servers) === 'undefined' || config.servers.length === 0) { + + /* + * create a service using internal defaults; + * this is used for auto-detecting the settings + * if possible + */ + + netdata.service({ + name: 'a name for this service', + update_every: this.update_every, + module: this, + processor: netdata.processors.myprocessor, + // any other information your processor needs + }).execute(this.processResponse); + + eligible_services++; + } + else { + + /* + * create a service for each server in the + * configuration file + */ + + var len = config.servers.length; + while(len--) { + var server = config.servers[len]; + + netdata.service({ + name: server.name, + update_every: server.update_every, + module: this, + processor: netdata.processors.myprocessor, + // any other information your processor needs + }).execute(this.processResponse); + + eligible_services++; + } + } + + return eligible_services; + }, + + update: function(service, callback) { + + /* + * this function is called when each service + * created by the configure function, needs to + * collect updated values. + * + * You normally will not need to change it. + */ + + service.execute(function(service, data) { + mymodule.processResponse(service, data); + callback(); + }); + }, +}; + +module.exports = mymodule; +``` + +#### configure(config) + +`configure(config)` is called just once, when `node.d.plugin` starts. +The config file will contain the contents of `/etc/netdata/node.d/mymodule.conf`. +This file should have the following format: + +```js +{ + "enable_autodetect": false, + "update_every": 5, + "servers": [ { /* server 1 */ }, { /* server 2 */ } ] +} +``` + +If the config file `/etc/netdata/node.d/mymodule.conf` does not give a `enable_autodetect` or `update_every`, these +will be added by `node.d.plugin`. So you module will always have them. + +The configuration file `/etc/netdata/node.d/mymodule.conf` may contain whatever else is needed for `mymodule`. + +#### processResponse(data) + +`data` may be `null` or whatever the processor specified in the `service` returned. + +The `service` object defines a set of functions to allow you send information to the netdata core about: + +1. Charts and dimension definitions +2. Updated values, from the collected values + +--- + +*FIXME: document an operational node.d.plugin data collector - the best example is the +[snmp collector](snmp/snmp.node.js)* diff --git a/conf.d/node.d/fronius.conf.md b/collectors/node.d.plugin/fronius/README.md similarity index 52% rename from conf.d/node.d/fronius.conf.md rename to collectors/node.d.plugin/fronius/README.md index 622086b274..dd28469905 100644 --- a/conf.d/node.d/fronius.conf.md +++ b/collectors/node.d.plugin/fronius/README.md @@ -1,3 +1,56 @@ +# fronius + +This module collects metrics from the configured solar power installation from Fronius Symo. + +**Requirements** + * Configuration file `fronius.conf` in the node.d netdata config dir (default: `/etc/netdata/node.d/fronius.conf`) + * Fronius Symo with network access (http) + +It produces per server: + +1. **Power** + * Current power input from the grid (positive values), output to the grid (negative values), in W + * Current power input from the solar panels, in W + * Current power stored in the accumulator (if present), in W (in theory, untested) + +2. **Consumption** + * Local consumption in W + +3. **Autonomy** + * Relative autonomy in %. 100 % autonomy means that the solar panels are delivering more power than it is needed by local consumption. + * Relative self consumption in %. The lower the better + +4. **Energy** + * The energy produced during the current day, in kWh + * The energy produced during the current year, in kWh + +5. **Inverter** + * The current power output from the connected inverters, in W, one dimension per inverter. At least one is always present. + + +### configuration + +Sample: + +```json +{ + "enable_autodetect": false, + "update_every": 5, + "servers": [ + { + "name": "Symo", + "hostname": "symo.ip.or.dns", + "update_every": 5, + "api_path": "/solar_api/v1/GetPowerFlowRealtimeData.fcgi" + } + ] +} +``` + +If no configuration is given, the module will be disabled. Each `update_every` is optional, the default is `5`. + +--- + [Fronius Symo 8.2](https://www.fronius.com/en/photovoltaics/products/all-products/inverters/fronius-symo/fronius-symo-8-2-3-m) The plugin has been tested with a single inverter, namely Fronius Symo 8.2-3-M: diff --git a/node.d/fronius.node.js b/collectors/node.d.plugin/fronius/fronius.node.js similarity index 100% rename from node.d/fronius.node.js rename to collectors/node.d.plugin/fronius/fronius.node.js diff --git a/conf.d/node.d/named.conf.md b/collectors/node.d.plugin/named/README.md similarity index 100% rename from conf.d/node.d/named.conf.md rename to collectors/node.d.plugin/named/README.md diff --git a/node.d/named.node.js b/collectors/node.d.plugin/named/named.node.js similarity index 100% rename from node.d/named.node.js rename to collectors/node.d.plugin/named/named.node.js diff --git a/conf.d/node.d.conf b/collectors/node.d.plugin/node.d.conf similarity index 100% rename from conf.d/node.d.conf rename to collectors/node.d.plugin/node.d.conf diff --git a/plugins.d/node.d.plugin.in b/collectors/node.d.plugin/node.d.plugin.in similarity index 99% rename from plugins.d/node.d.plugin.in rename to collectors/node.d.plugin/node.d.plugin.in index 05c126e900..53e5302add 100755 --- a/plugins.d/node.d.plugin.in +++ b/collectors/node.d.plugin/node.d.plugin.in @@ -40,7 +40,7 @@ var util = require('util'); var http = require('http'); var path = require('path'); var extend = require('extend'); -var netdata = require('netdata'); +var netdata = require('../../../netdata'); // -------------------------------------------------------------------------------------------------------------------- diff --git a/node.d/node_modules/asn1-ber.js b/collectors/node.d.plugin/node_modules/asn1-ber.js similarity index 100% rename from node.d/node_modules/asn1-ber.js rename to collectors/node.d.plugin/node_modules/asn1-ber.js diff --git a/node.d/node_modules/extend.js b/collectors/node.d.plugin/node_modules/extend.js similarity index 100% rename from node.d/node_modules/extend.js rename to collectors/node.d.plugin/node_modules/extend.js diff --git a/node.d/node_modules/lib/ber/errors.js b/collectors/node.d.plugin/node_modules/lib/ber/errors.js similarity index 100% rename from node.d/node_modules/lib/ber/errors.js rename to collectors/node.d.plugin/node_modules/lib/ber/errors.js diff --git a/node.d/node_modules/lib/ber/index.js b/collectors/node.d.plugin/node_modules/lib/ber/index.js similarity index 100% rename from node.d/node_modules/lib/ber/index.js rename to collectors/node.d.plugin/node_modules/lib/ber/index.js diff --git a/node.d/node_modules/lib/ber/reader.js b/collectors/node.d.plugin/node_modules/lib/ber/reader.js similarity index 100% rename from node.d/node_modules/lib/ber/reader.js rename to collectors/node.d.plugin/node_modules/lib/ber/reader.js diff --git a/node.d/node_modules/lib/ber/types.js b/collectors/node.d.plugin/node_modules/lib/ber/types.js similarity index 100% rename from node.d/node_modules/lib/ber/types.js rename to collectors/node.d.plugin/node_modules/lib/ber/types.js diff --git a/node.d/node_modules/lib/ber/writer.js b/collectors/node.d.plugin/node_modules/lib/ber/writer.js similarity index 100% rename from node.d/node_modules/lib/ber/writer.js rename to collectors/node.d.plugin/node_modules/lib/ber/writer.js diff --git a/node.d/node_modules/net-snmp.js b/collectors/node.d.plugin/node_modules/net-snmp.js similarity index 100% rename from node.d/node_modules/net-snmp.js rename to collectors/node.d.plugin/node_modules/net-snmp.js diff --git a/node.d/node_modules/netdata.js b/collectors/node.d.plugin/node_modules/netdata.js similarity index 100% rename from node.d/node_modules/netdata.js rename to collectors/node.d.plugin/node_modules/netdata.js diff --git a/node.d/node_modules/pixl-xml.js b/collectors/node.d.plugin/node_modules/pixl-xml.js similarity index 100% rename from node.d/node_modules/pixl-xml.js rename to collectors/node.d.plugin/node_modules/pixl-xml.js diff --git a/conf.d/node.d/sma_webbox.conf.md b/collectors/node.d.plugin/sma_webbox/README.md similarity index 100% rename from conf.d/node.d/sma_webbox.conf.md rename to collectors/node.d.plugin/sma_webbox/README.md diff --git a/node.d/sma_webbox.node.js b/collectors/node.d.plugin/sma_webbox/sma_webbox.node.js similarity index 100% rename from node.d/sma_webbox.node.js rename to collectors/node.d.plugin/sma_webbox/sma_webbox.node.js diff --git a/conf.d/node.d/snmp.conf.md b/collectors/node.d.plugin/snmp/README.md similarity index 100% rename from conf.d/node.d/snmp.conf.md rename to collectors/node.d.plugin/snmp/README.md diff --git a/node.d/snmp.node.js b/collectors/node.d.plugin/snmp/snmp.node.js similarity index 100% rename from node.d/snmp.node.js rename to collectors/node.d.plugin/snmp/snmp.node.js diff --git a/conf.d/node.d/stiebeleltron.conf.md b/collectors/node.d.plugin/stiebeleltron/README.md similarity index 91% rename from conf.d/node.d/stiebeleltron.conf.md rename to collectors/node.d.plugin/stiebeleltron/README.md index 6ae5aa1c7e..66834d9314 100644 --- a/conf.d/node.d/stiebeleltron.conf.md +++ b/collectors/node.d.plugin/stiebeleltron/README.md @@ -1,3 +1,57 @@ +# stiebel eltron + +This module collects metrics from the configured heat pump and hot water installation from Stiebel Eltron ISG web. + +**Requirements** + * Configuration file `stiebeleltron.conf` in the node.d netdata config dir (default: `/etc/netdata/node.d/stiebeleltron.conf`) + * Stiebel Eltron ISG web with network access (http), without password login + +The charts are configurable, however, the provided default configuration collects the following: + +1. **General** + * Outside temperature in C + * Condenser temperature in C + * Heating circuit pressure in bar + * Flow rate in l/min + * Output of water and heat pumps in % + +2. **Heating** + * Heat circuit 1 temperature in C (set/actual) + * Heat circuit 2 temperature in C (set/actual) + * Flow temperature in C (set/actual) + * Buffer temperature in C (set/actual) + * Pre-flow temperature in C + +3. **Hot Water** + * Hot water temperature in C (set/actual) + +4. **Room Temperature** + * Heat circuit 1 room temperature in C (set/actual) + * Heat circuit 2 room temperature in C (set/actual) + +5. **Eletric Reheating** + * Dual Mode Reheating temperature in C (hot water/heating) + +6. **Process Data** + * Remaining compressor rest time in s + +7. **Runtime** + * Compressor runtime hours (hot water/heating) + * Reheating runtime hours (reheating 1/reheating 2) + +8. **Energy** + * Compressor today in kWh (hot water/heating) + * Compressor Total in kWh (hot water/heating) + + +### configuration + +The default configuration is provided in [netdata/conf.d/node.d/stiebeleltron.conf.md](https://github.com/netdata/netdata/blob/master/conf.d/node.d/stiebeleltron.conf.md). Just change the `update_every` (if necessary) and hostnames. **You may have to adapt the configuration to suit your needs and setup** (which might be different). + +If no configuration is given, the module will be disabled. Each `update_every` is optional, the default is `10`. + +--- + [Stiebel Eltron Heat pump system with ISG](https://www.stiebel-eltron.com/en/home/products-solutions/renewables/controller_energymanagement/internet_servicegateway/isg_web.html) Original author: BrainDoctor (github) diff --git a/node.d/stiebeleltron.node.js b/collectors/node.d.plugin/stiebeleltron/stiebeleltron.node.js similarity index 100% rename from node.d/stiebeleltron.node.js rename to collectors/node.d.plugin/stiebeleltron/stiebeleltron.node.js diff --git a/collectors/plugins.d/Makefile.am b/collectors/plugins.d/Makefile.am new file mode 100644 index 0000000000..59250a997e --- /dev/null +++ b/collectors/plugins.d/Makefile.am @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +SUBDIRS = \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/collectors/plugins.d/README.md b/collectors/plugins.d/README.md new file mode 100644 index 0000000000..a3ed8c5d27 --- /dev/null +++ b/collectors/plugins.d/README.md @@ -0,0 +1,347 @@ +# Netdata External Plugins + +`plugins.d` is the netdata internal plugin that collects metrics +from external processes, thus allowing netdata to use **external plugins**. + +## Provided External Plugins + +plugin|language|O/S|description +:---:|:---:|:---:|:--- +[apps.plugin](../apps.plugin/)|`C`|linux, freebsd|monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**. +[charts.d.plugin](../charts.d.plugin/)|`BASH`|all|a **plugin orchestrator** for data collection modules written in `BASH` v4+. +[fping.plugin](../fping.plugin/)|`C`|all|measures network latency, jitter and packet loss between the monitored node and any number of remote network end points. +[freeipmi.plugin](../freeipmi.plugin/)|`C`|linux|collects metrics from enterprise hardware sensors, on Linux servers. +[node.d.plugin](../node.d.plugin/)|`node.js`|all|a **plugin orchestrator** for data collection modules written in `node.js`. +[python.d.plugin](../python.d.plugin/)|`python`|all|a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported). + + +## Motivation + +This plugin allows netdata to use **external plugins** for data collection: + +1. external data collection plugins may be written in any computer language. +2. external data collection plugins may use O/S capabilities or `setuid` to + run with escalated privileges (compared to the netdata daemon). + The communication between the external plugin and netdata is unidirectional + (from the plugin to netdata), so that netdata cannot manipulate an external + plugin running with escalated privileges. + +## Operation + +Each of the external plugins is expected to run forever. +Netdata will start it when it starts and stop it when it exits. + +If the external plugin exits or crashes, netdata will log an error. +If the external plugin exits or crashes without pushing metrics to netdata, +netdata will not start it again. + +The `stdout` of external plugins is connected to netdata to receive metrics, +with the API defined below. + +The `stderr` of external plugins is connected to netdata `error.log`. + +## Configuration + +This plugin is configured via `netdata.conf`, section `[plugins]`. +At this section there a list of all the plugins found at the system it runs +with a boolean setting to enable them or not. + +Example: + +``` +[plugins] + # enable running new plugins = yes + # check for new plugins every = 60 + + # charts.d = yes + # fping = yes + # node.d = yes + # python.d = yes +``` + +The setting `enable running new plugins` changes the default behavior for all external plugins. +So if set to `no`, only the plugins that are explicitly set to `yes` will be run. + +The setting `check for new plugins every` controls the time the directory `/usr/libexec/netdata/plugins.d` +will be rescanned for new plugins. So, new plugins can give added anytime. + +For each of the external plugins enabled, another `netdata.conf` section +is created, in the form of `[plugin:NAME]`, where `NAME` is the name of the external plugin. +This section allows controlling the update frequency of the plugin and provide +additional command line arguments to it. + +For example, for `apps.plugin` the following section is available: + +``` +[plugin:apps] + # update every = 1 + # command options = +``` + +- `update every` controls the granularity of the external plugin. +- `command options` allows giving additional command line options to the plugin. + + +## External Plugins API + +Any program that can print a few values to its standard output can become a netdata external plugin. + +There are 7 lines netdata parses. lines starting with: + +- `CHART` - create or update a chart +- `DIMENSION` - add or update a dimension to the chart just created +- `BEGIN` - initialize data collection for a chart +- `SET` - set the value of a dimension for the initialized chart +- `END` - complete data collection for the initialized chart +- `FLUSH` - ignore the last collected values +- `DISABLE` - disable this plugin + +a single program can produce any number of charts with any number of dimensions each. + +Charts can be added any time (not just the beginning). + +### command line parameters + +The plugin **MUST** accept just **one** parameter: **the number of seconds it is +expected to update the values for its charts**. The value passed by netdata +to the plugin is controlled via its configuration file (so there is no need +for the plugin to handle this configuration option). + +The external plugin can overwrite the update frequency. For example, the server may +request per second updates, but the plugin may ignore it and update its charts +every 5 seconds. + +### environment variables + +There are a few environment variables that are set by `netdata` and are +available for the plugin to use. + +variable|description +:------:|:---------- +`NETDATA_USER_CONFIG_DIR`|The directory where all netdata related user configuration should be stored. If the plugin requires custom user configuration, this is the place the user has saved it (normally under `/etc/netdata`). +`NETDATA_STOCK_CONFIG_DIR`|The directory where all netdata related stock configuration should be stored. If the plugin is shipped with configuration files, this is the place they can be found (normally under `/usr/lib/netdata/conf.d`). +`NETDATA_PLUGINS_DIR`|The directory where all netdata plugins are stored. +`NETDATA_WEB_DIR`|The directory where the web files of netdata are saved. +`NETDATA_CACHE_DIR`|The directory where the cache files of netdata are stored. Use this directory if the plugin requires a place to store data. A new directory should be created for the plugin for this purpose, inside this directory. +`NETDATA_LOG_DIR`|The directory where the log files are stored. By default the `stderr` output of the plugin will be saved in the `error.log` file of netdata. +`NETDATA_HOST_PREFIX`|This is used in environments where system directories like `/sys` and `/proc` have to be accessed at a different path. +`NETDATA_DEBUG_FLAGS`|This is a number (probably in hex starting with `0x`), that enables certain netdata debugging features. Check **[[Tracing Options]]** for more information. +`NETDATA_UPDATE_EVERY`|The minimum number of seconds between chart refreshes. This is like the **internal clock** of netdata (it is user configurable, defaulting to `1`). There is no meaning for a plugin to update its values more frequently than this number of seconds. + + +### the output of the plugin + +The plugin should output instructions for netdata to its output (`stdout`). Since this uses pipes, please make sure you flush stdout after every iteration. + +#### DISABLE + +`DISABLE` will disable this plugin. This will prevent netdata from restarting the plugin. You can also exit with the value `1` to have the same effect. + +#### CHART + +`CHART` defines a new chart. + +the template is: + +> CHART type.id name title units [family [context [charttype [priority [update_every [options [plugin [module]]]]]]]] + + where: + - `type.id` + + uniquely identifies the chart, + this is what will be needed to add values to the chart + + the `type` part controls the menu the charts will appear in + + - `name` + + is the name that will be presented to the user instead of `id` in `type.id`. This means that only the `id` part of `type.id` is changed. When a name has been given, the chart is index (and can be referred) as both `type.id` and `type.name`. You can set name to `''`, or `null`, or `(null)` to disable it. + + - `title` + + the text above the chart + + - `units` + + the label of the vertical axis of the chart, + all dimensions added to a chart should have the same units + of measurement + + - `family` + + is used to group charts together + (for example all eth0 charts should say: eth0), + if empty or missing, the `id` part of `type.id` will be used + + this controls the sub-menu on the dashboard + + - `context` + + the context is giving the template of the chart. For example, if multiple charts present the same information for a different family, they should have the same `context` + + this is used for looking up rendering information for the chart (colors, sizes, informational texts) and also apply alarms to it + + - `charttype` + + one of `line`, `area` or `stacked`, + if empty or missing, the `line` will be used + + - `priority` + + is the relative priority of the charts as rendered on the web page, + lower numbers make the charts appear before the ones with higher numbers, + if empty or missing, `1000` will be used + + - `update_every` + + overwrite the update frequency set by the server, + if empty or missing, the user configured value will be used + + - `options` + + a space separated list of options, enclosed in quotes. 4 options are currently supported: `obsolete` to mark a chart as obsolete (netdata will hide it and delete it after some time), `detail` to mark a chart as insignificant (this may be used by dashboards to make the charts smaller, or somehow visualize properly a less important chart), `store_first` to make netdata store the first collected value, assuming there was an invisible previous value set to zero (this is used by statsd charts - if the first data collected value of incremental dimensions is not zero based, unrealistic spikes will appear with this option set) and `hidden` to perform all operations on a chart, but do not offer it on dashboards (the chart will be send to backends). `CHART` options have been added in netdata v1.7 and the `hidden` option was added in 1.10. + + - `plugin` and `module` + + both are just names that are used to let the user the plugin and its module that generated the chart. If `plugin` is unset or empty, netdata will automatically set the filename of the plugin that generated the chart. `module` has not default. + + +#### DIMENSION + +`DIMENSION` defines a new dimension for the chart + +the template is: + +> DIMENSION id [name [algorithm [multiplier [divisor [hidden]]]]] + + where: + + - `id` + + the `id` of this dimension (it is a text value, not numeric), + this will be needed later to add values to the dimension + + We suggest to avoid using `.` in dimension ids. Backends expect metrics to be `.` separated and people will get confused if a dimension id contains a dot. + + - `name` + + the name of the dimension as it will appear at the legend of the chart, + if empty or missing the `id` will be used + + - `algorithm` + + one of: + + * `absolute` + + the value is to drawn as-is (interpolated to second boundary), + if `algorithm` is empty, invalid or missing, `absolute` is used + + * `incremental` + + the value increases over time, + the difference from the last value is presented in the chart, + the server interpolates the value and calculates a per second figure + + * `percentage-of-absolute-row` + + the % of this value compared to the total of all dimensions + + * `percentage-of-incremental-row` + + the % of this value compared to the incremental total of + all dimensions + + - `multiplier` + + an integer value to multiply the collected value, + if empty or missing, `1` is used + + - `divisor` + + an integer value to divide the collected value, + if empty or missing, `1` is used + + - `hidden` + + giving the keyword `hidden` will make this dimension hidden, + it will take part in the calculations but will not be presented in the chart + + +#### VARIABLE + +> VARIABLE [SCOPE] name = value + +`VARIABLE` defines a variable that can be used in alarms. This is to used for setting constants (like the max connections a server may accept). + +Variables support 2 scopes: + +- `GLOBAL` or `HOST` to define the variable at the host level. +- `LOCAL` or `CHART` to define the variable at the chart level. Use chart-local variables when the same variable may exist for different charts (i.e. netdata monitors 2 mysql servers, and you need to set the `max_connections` each server accepts). Using chart-local variables is the ideal to build alarm templates. + +The position of the `VARIABLE` line, sets its default scope (in case you do not specify a scope). So, defining a `VARIABLE` before any `CHART`, or between `END` and `BEGIN` (outside any chart), sets `GLOBAL` scope, while defining a `VARIABLE` just after a `CHART` or a `DIMENSION`, or within the `BEGIN` - `END` block of a chart, sets `LOCAL` scope. + +These variables can be set and updated at any point. + +Variable names should use alphanumeric characters, the `.` and the `_`. + +The `value` is floating point (netdata used `long double`). + +Variables are transferred to upstream netdata servers (streaming and database replication). + +## data collection + +data collection is defined as a series of `BEGIN` -> `SET` -> `END` lines + +> BEGIN type.id [microseconds] + + - `type.id` + + is the unique identification of the chart (as given in `CHART`) + + - `microseconds` + + is the number of microseconds since the last update of the chart. It is optional. + + Under heavy system load, the system may have some latency transferring + data from the plugins to netdata via the pipe. This number improves + accuracy significantly, since the plugin is able to calculate the + duration between its iterations better than netdata. + + The first time the plugin is started, no microseconds should be given + to netdata. + +> SET id = value + + - `id` + + is the unique identification of the dimension (of the chart just began) + + - `value` + + is the collected value, only integer values are collected. If you want to push fractional values, multiply this value by 100 or 1000 and set the `DIMENSION` divider to 1000. + +> END + + END does not take any parameters, it commits the collected values for all dimensions to the chart. If a dimensions was not `SET`, its value will be empty for this commit. + +More `SET` lines may appear to update all the dimensions of the chart. +All of them in one `BEGIN` -> `END` block. + +All `SET` lines within a single `BEGIN` -> `END` block have to refer to the +same chart. + +If more charts need to be updated, each chart should have its own +`BEGIN` -> `SET` -> `END` block. + +If, for any reason, a plugin has issued a `BEGIN` but wants to cancel it, +it can issue a `FLUSH`. The `FLUSH` command will instruct netdata to ignore +all the values collected since the last `BEGIN` command. + +If a plugin does not behave properly (outputs invalid lines, or does not +follow these guidelines), will be disabled by netdata. + +### collected values + +netdata will collect any **signed** value in the 64bit range: +`-9.223.372.036.854.775.808` to `+9.223.372.036.854.775.807` diff --git a/src/plugins/plugins.d.plugin/plugins_d.c b/collectors/plugins.d/plugins_d.c similarity index 100% rename from src/plugins/plugins.d.plugin/plugins_d.c rename to collectors/plugins.d/plugins_d.c diff --git a/src/plugins/plugins.d.plugin/plugins_d.h b/collectors/plugins.d/plugins_d.h similarity index 98% rename from src/plugins/plugins.d.plugin/plugins_d.h rename to collectors/plugins.d/plugins_d.h index 57c2e232ca..adccf3f0fb 100644 --- a/src/plugins/plugins.d.plugin/plugins_d.h +++ b/collectors/plugins.d/plugins_d.h @@ -3,7 +3,7 @@ #ifndef NETDATA_PLUGINS_D_H #define NETDATA_PLUGINS_D_H 1 -#include "../../common.h" +#include "../../daemon/common.h" #define NETDATA_PLUGIN_HOOK_PLUGINSD \ { \ diff --git a/collectors/proc.plugin/Makefile.am b/collectors/proc.plugin/Makefile.am new file mode 100644 index 0000000000..19554bed8e --- /dev/null +++ b/collectors/proc.plugin/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/collectors/proc.plugin/README.md b/collectors/proc.plugin/README.md new file mode 100644 index 0000000000..4130e7ab5f --- /dev/null +++ b/collectors/proc.plugin/README.md @@ -0,0 +1,200 @@ + +# proc.plugin + + - `/proc/net/dev` (all network interfaces for all their values) + - `/proc/diskstats` (all disks for all their values) + - `/proc/net/snmp` (total IPv4, TCP and UDP usage) + - `/proc/net/snmp6` (total IPv6 usage) + - `/proc/net/netstat` (more IPv4 usage) + - `/proc/net/stat/nf_conntrack` (connection tracking performance) + - `/proc/net/stat/synproxy` (synproxy performance) + - `/proc/net/ip_vs/stats` (IPVS connection statistics) + - `/proc/stat` (CPU utilization) + - `/proc/meminfo` (memory information) + - `/proc/vmstat` (system performance) + - `/proc/net/rpc/nfsd` (NFS server statistics for both v3 and v4 NFS servers) + - `/sys/fs/cgroup` (Control Groups - Linux Containers) + - `/proc/self/mountinfo` (mount points) + - `/proc/interrupts` (total and per core hardware interrupts) + - `/proc/softirqs` (total and per core software interrupts) + - `/proc/loadavg` (system load and total processes running) + - `/proc/sys/kernel/random/entropy_avail` (random numbers pool availability - used in cryptography) + - `ksm` Kernel Same-Page Merging performance (several files under `/sys/kernel/mm/ksm`). + - `netdata` (internal netdata resources utilization) + + +--- + +# Monitoring Disks' Performance with netdata + +> Live demo of disk monitoring at: **[http://london.netdata.rocks](http://london.netdata.rocks/#disk)** + +Performance monitoring for Linux disks is quite complicated. The main reason is the plethora of disk technologies available. There are many different hardware disk technologies, but there are even more **virtual disk** technologies that can provide additional storage features. + +Hopefully, the Linux kernel provides many metrics that can provide deep insights of what our disks our doing. The kernel measures all these metrics on all layers of storage: **virtual disks**, **physical disks** and **partitions of disks**. + +Let's see the list of metrics provided by netdata for each of the above: + +### I/O bandwidth/s (kb/s) + +The amount of data transferred from and to the disk. + +### I/O operations/s + +The number of I/O operations completed. + +### Queued I/O operations + +The number of currently queued I/O operations. For traditional disks that execute commands one after another, one of them is being run by the disk and the rest are just waiting in a queue. + +### Backlog size (time in ms) + +The expected duration of the currently queued I/O operations. + +### Utilization (time percentage) + +The percentage of time the disk was busy with something. This is a very interesting metric, since for most disks, that execute commands sequentially, **this is the key indication of congestion**. A sequential disk that is 100% of the available time busy, has no time to do anything more, so even if the bandwidth or the number of operations executed by the disk is low, its capacity has been reached. + +Of course, for newer disk technologies (like fusion cards) that are capable to execute multiple commands in parallel, this metric is just meaningless. + +### Average I/O operation time (ms) + +The average time for I/O requests issued to the device to be served. This includes the time spent by the requests in queue and the time spent servicing them. + +### Average I/O operation size (kb) + +The average amount of data of the completed I/O operations. + +### Average Service Time (ms) + +The average service time for completed I/O operations. This metric is calculated using the total busy time of the disk and the number of completed operations. If the disk is able to execute multiple parallel operations the reporting average service time will be misleading. + +### Merged I/O operations/s + +The Linux kernel is capable of merging I/O operations. So, if two requests to read data from the disk are adjacent, the Linux kernel may merge them to one before giving them to disk. This metric measures the number of operations that have been merged by the Linux kernel. + +### Total I/O time + +The sum of the duration of all completed I/O operations. This number can exceed the interval if the disk is able to execute multiple I/O operations in parallel. + +### Space usage + +For mounted disks, netdata will provide a chart for their space, with 3 dimensions: + +1. free +2. used +3. reserved for root + +### inode usage + +For mounted disks, netdata will provide a chart for their inodes (number of file and directories), with 3 dimensions: + +1. free +2. used +3. reserved for root + +--- + +## disk names + +netdata will automatically set the name of disks on the dashboard, from the mount point they are mounted, of course only when they are mounted. Changes in mount points are not currently detected (you will have to restart netdata to change the name of the disk). + +--- + +## performance metrics + +By default netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). + +netdata categorizes all block devices in 3 categories: + +1. physical disks (i.e. block devices that does not have slaves and are not partitions) +2. virtual disks (i.e. block devices that have slaves - like RAID devices) +3. disk partitions (i.e. block devices that are part of a physical disk) + +Performance metrics are enabled by default for all disk devices, except partitions and not-mounted virtual disks. Of course, you can enable/disable monitoring any block device by editing the netdata configuration file. + +### netdata configuration + +You can get the running netdata configuration using this: + +```sh +cd /etc/netdata +curl "http://localhost:19999/netdata.conf" >netdata.conf.new +mv netdata.conf.new netdata.conf +``` + +Then edit `netdata.conf` and find the following section. This is the basic plugin configuration. + +``` +[plugin:proc:/proc/diskstats] + # enable new disks detected at runtime = yes + # performance metrics for physical disks = auto + # performance metrics for virtual disks = no + # performance metrics for partitions = no + # performance metrics for mounted filesystems = no + # performance metrics for mounted virtual disks = auto + # space metrics for mounted filesystems = auto + # bandwidth for all disks = auto + # operations for all disks = auto + # merged operations for all disks = auto + # i/o time for all disks = auto + # queued operations for all disks = auto + # utilization percentage for all disks = auto + # backlog for all disks = auto + # space usage for all disks = auto + # inodes usage for all disks = auto + # filename to monitor = /proc/diskstats + # path to get block device infos = /sys/dev/block/%lu:%lu/%s + # path to get h/w sector size = /sys/block/%s/queue/hw_sector_size + # path to get h/w sector size for partitions = /sys/dev/block/%lu:%lu/subsystem/%s/../queue +/hw_sector_size + +``` + +For each virtual disk, physical disk and partition you will have a section like this: + +``` +[plugin:proc:/proc/diskstats:sda] + # enable = yes + # enable performance metrics = auto + # bandwidth = auto + # operations = auto + # merged operations = auto + # i/o time = auto + # queued operations = auto + # utilization percentage = auto + # backlog = auto +``` + +For all configuration options: +- `auto` = enable monitoring if the collected values are not zero +- `yes` = enable monitoring +- `no` = disable monitoring + +Of course, to set options, you will have to uncomment them. The comments show the internal defaults. + +After saving `/etc/netdata/netdata.conf`, restart your netdata to apply them. + +#### Disabling performance metrics for individual device and to multiple devices by device type +You can pretty easy disable performance metrics for individual device, for ex.: +``` +[plugin:proc:/proc/diskstats:sda] + enable performance metrics = no +``` +But sometimes you need disable performance metrics for all devices with the same type, to do it you need to figure out device type from `/proc/diskstats` for ex.: +``` + 7 0 loop0 1651 0 3452 168 0 0 0 0 0 8 168 + 7 1 loop1 4955 0 11924 880 0 0 0 0 0 64 880 + 7 2 loop2 36 0 216 4 0 0 0 0 0 4 4 + 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0 + 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0 + 251 2 zram2 27487 0 219896 188 79953 0 639624 1640 0 1828 1828 + 251 3 zram3 27348 0 218784 152 79952 0 639616 1960 0 2060 2104 +``` +All zram devices starts with `251` number and all loop devices starts with `7`. +So, to disable performance metrics for all loop devices you could add `performance metrics for disks with major 7 = no` to `[plugin:proc:/proc/diskstats]` section. +``` +[plugin:proc:/proc/diskstats] + performance metrics for disks with major 7 = no +``` + diff --git a/src/plugins/linux-proc.plugin/ipc.c b/collectors/proc.plugin/ipc.c similarity index 100% rename from src/plugins/linux-proc.plugin/ipc.c rename to collectors/proc.plugin/ipc.c diff --git a/src/plugins/linux-proc.plugin/plugin_proc.c b/collectors/proc.plugin/plugin_proc.c similarity index 100% rename from src/plugins/linux-proc.plugin/plugin_proc.c rename to collectors/proc.plugin/plugin_proc.c diff --git a/src/plugins/linux-proc.plugin/plugin_proc.h b/collectors/proc.plugin/plugin_proc.h similarity index 98% rename from src/plugins/linux-proc.plugin/plugin_proc.h rename to collectors/proc.plugin/plugin_proc.h index 4b544f1e91..bfefe1ad4e 100644 --- a/src/plugins/linux-proc.plugin/plugin_proc.h +++ b/collectors/proc.plugin/plugin_proc.h @@ -3,7 +3,7 @@ #ifndef NETDATA_PLUGIN_PROC_H #define NETDATA_PLUGIN_PROC_H 1 -#include "../../common.h" +#include "../../daemon/common.h" #if (TARGET_OS == OS_LINUX) diff --git a/src/plugins/linux-proc.plugin/proc_diskstats.c b/collectors/proc.plugin/proc_diskstats.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_diskstats.c rename to collectors/proc.plugin/proc_diskstats.c diff --git a/src/plugins/linux-proc.plugin/proc_interrupts.c b/collectors/proc.plugin/proc_interrupts.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_interrupts.c rename to collectors/proc.plugin/proc_interrupts.c diff --git a/src/plugins/linux-proc.plugin/proc_loadavg.c b/collectors/proc.plugin/proc_loadavg.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_loadavg.c rename to collectors/proc.plugin/proc_loadavg.c diff --git a/src/plugins/linux-proc.plugin/proc_meminfo.c b/collectors/proc.plugin/proc_meminfo.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_meminfo.c rename to collectors/proc.plugin/proc_meminfo.c diff --git a/src/plugins/linux-proc.plugin/proc_net_dev.c b/collectors/proc.plugin/proc_net_dev.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_net_dev.c rename to collectors/proc.plugin/proc_net_dev.c diff --git a/src/plugins/linux-proc.plugin/proc_net_ip_vs_stats.c b/collectors/proc.plugin/proc_net_ip_vs_stats.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_net_ip_vs_stats.c rename to collectors/proc.plugin/proc_net_ip_vs_stats.c diff --git a/src/plugins/linux-proc.plugin/proc_net_netstat.c b/collectors/proc.plugin/proc_net_netstat.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_net_netstat.c rename to collectors/proc.plugin/proc_net_netstat.c diff --git a/src/plugins/linux-proc.plugin/proc_net_rpc_nfs.c b/collectors/proc.plugin/proc_net_rpc_nfs.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_net_rpc_nfs.c rename to collectors/proc.plugin/proc_net_rpc_nfs.c diff --git a/src/plugins/linux-proc.plugin/proc_net_rpc_nfsd.c b/collectors/proc.plugin/proc_net_rpc_nfsd.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_net_rpc_nfsd.c rename to collectors/proc.plugin/proc_net_rpc_nfsd.c diff --git a/src/plugins/linux-proc.plugin/proc_net_sctp_snmp.c b/collectors/proc.plugin/proc_net_sctp_snmp.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_net_sctp_snmp.c rename to collectors/proc.plugin/proc_net_sctp_snmp.c diff --git a/src/plugins/linux-proc.plugin/proc_net_snmp.c b/collectors/proc.plugin/proc_net_snmp.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_net_snmp.c rename to collectors/proc.plugin/proc_net_snmp.c diff --git a/src/plugins/linux-proc.plugin/proc_net_snmp6.c b/collectors/proc.plugin/proc_net_snmp6.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_net_snmp6.c rename to collectors/proc.plugin/proc_net_snmp6.c diff --git a/src/plugins/linux-proc.plugin/proc_net_sockstat.c b/collectors/proc.plugin/proc_net_sockstat.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_net_sockstat.c rename to collectors/proc.plugin/proc_net_sockstat.c diff --git a/src/plugins/linux-proc.plugin/proc_net_sockstat6.c b/collectors/proc.plugin/proc_net_sockstat6.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_net_sockstat6.c rename to collectors/proc.plugin/proc_net_sockstat6.c diff --git a/src/plugins/linux-proc.plugin/proc_net_softnet_stat.c b/collectors/proc.plugin/proc_net_softnet_stat.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_net_softnet_stat.c rename to collectors/proc.plugin/proc_net_softnet_stat.c diff --git a/src/plugins/linux-proc.plugin/proc_net_stat_conntrack.c b/collectors/proc.plugin/proc_net_stat_conntrack.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_net_stat_conntrack.c rename to collectors/proc.plugin/proc_net_stat_conntrack.c diff --git a/src/plugins/linux-proc.plugin/proc_net_stat_synproxy.c b/collectors/proc.plugin/proc_net_stat_synproxy.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_net_stat_synproxy.c rename to collectors/proc.plugin/proc_net_stat_synproxy.c diff --git a/src/plugins/linux-proc.plugin/proc_self_mountinfo.c b/collectors/proc.plugin/proc_self_mountinfo.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_self_mountinfo.c rename to collectors/proc.plugin/proc_self_mountinfo.c diff --git a/src/plugins/linux-proc.plugin/proc_self_mountinfo.h b/collectors/proc.plugin/proc_self_mountinfo.h similarity index 100% rename from src/plugins/linux-proc.plugin/proc_self_mountinfo.h rename to collectors/proc.plugin/proc_self_mountinfo.h diff --git a/src/plugins/linux-proc.plugin/proc_softirqs.c b/collectors/proc.plugin/proc_softirqs.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_softirqs.c rename to collectors/proc.plugin/proc_softirqs.c diff --git a/src/plugins/linux-proc.plugin/proc_spl_kstat_zfs.c b/collectors/proc.plugin/proc_spl_kstat_zfs.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_spl_kstat_zfs.c rename to collectors/proc.plugin/proc_spl_kstat_zfs.c diff --git a/src/plugins/linux-proc.plugin/proc_stat.c b/collectors/proc.plugin/proc_stat.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_stat.c rename to collectors/proc.plugin/proc_stat.c diff --git a/src/plugins/linux-proc.plugin/proc_sys_kernel_random_entropy_avail.c b/collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_sys_kernel_random_entropy_avail.c rename to collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c diff --git a/src/plugins/linux-proc.plugin/proc_uptime.c b/collectors/proc.plugin/proc_uptime.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_uptime.c rename to collectors/proc.plugin/proc_uptime.c diff --git a/src/plugins/linux-proc.plugin/proc_vmstat.c b/collectors/proc.plugin/proc_vmstat.c similarity index 100% rename from src/plugins/linux-proc.plugin/proc_vmstat.c rename to collectors/proc.plugin/proc_vmstat.c diff --git a/src/plugins/linux-proc.plugin/sys_devices_system_edac_mc.c b/collectors/proc.plugin/sys_devices_system_edac_mc.c similarity index 100% rename from src/plugins/linux-proc.plugin/sys_devices_system_edac_mc.c rename to collectors/proc.plugin/sys_devices_system_edac_mc.c diff --git a/src/plugins/linux-proc.plugin/sys_devices_system_node.c b/collectors/proc.plugin/sys_devices_system_node.c similarity index 100% rename from src/plugins/linux-proc.plugin/sys_devices_system_node.c rename to collectors/proc.plugin/sys_devices_system_node.c diff --git a/src/plugins/linux-proc.plugin/sys_fs_btrfs.c b/collectors/proc.plugin/sys_fs_btrfs.c similarity index 100% rename from src/plugins/linux-proc.plugin/sys_fs_btrfs.c rename to collectors/proc.plugin/sys_fs_btrfs.c diff --git a/src/plugins/linux-proc.plugin/sys_kernel_mm_ksm.c b/collectors/proc.plugin/sys_kernel_mm_ksm.c similarity index 100% rename from src/plugins/linux-proc.plugin/sys_kernel_mm_ksm.c rename to collectors/proc.plugin/sys_kernel_mm_ksm.c diff --git a/src/plugins/linux-proc.plugin/zfs_common.c b/collectors/proc.plugin/zfs_common.c similarity index 100% rename from src/plugins/linux-proc.plugin/zfs_common.c rename to collectors/proc.plugin/zfs_common.c diff --git a/src/plugins/linux-proc.plugin/zfs_common.h b/collectors/proc.plugin/zfs_common.h similarity index 99% rename from src/plugins/linux-proc.plugin/zfs_common.h rename to collectors/proc.plugin/zfs_common.h index 02b55d8c3a..fab54f59af 100644 --- a/src/plugins/linux-proc.plugin/zfs_common.h +++ b/collectors/proc.plugin/zfs_common.h @@ -3,7 +3,7 @@ #ifndef NETDATA_ZFS_COMMON_H #define NETDATA_ZFS_COMMON_H 1 -#include "../../common.h" +#include "../../daemon/common.h" #define ZFS_FAMILY_SIZE "size" #define ZFS_FAMILY_EFFICIENCY "efficiency" diff --git a/python.d/Makefile.am b/collectors/python.d.plugin/Makefile.am similarity index 58% rename from python.d/Makefile.am rename to collectors/python.d.plugin/Makefile.am index 696cdc5170..f319acf9c0 100644 --- a/python.d/Makefile.am +++ b/collectors/python.d.plugin/Makefile.am @@ -1,78 +1,156 @@ # SPDX-License-Identifier: GPL-3.0-or-later -MAINTAINERCLEANFILES= $(srcdir)/Makefile.in + +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in CLEANFILES = \ - $(NULL) + python.d.plugin \ + $(NULL) include $(top_srcdir)/build/subst.inc - SUFFIXES = .in +dist_libconfig_DATA = \ + python.d.conf \ + $(NULL) + +dist_plugins_SCRIPTS = \ + python.d.plugin \ + $(NULL) + +dist_noinst_DATA = \ + python.d.plugin.in \ + README.md \ + $(NULL) + +pythonconfigdir=$(libconfigdir)/python.d +dist_pythonconfig_DATA = \ + apache/apache.conf \ + beanstalk/beanstalk.conf \ + bind_rndc/bind_rndc.conf \ + boinc/boinc.conf \ + ceph/ceph.conf \ + chrony/chrony.conf \ + couchdb/couchdb.conf \ + cpuidle/cpuidle.conf \ + cpufreq/cpufreq.conf \ + dns_query_time/dns_query_time.conf \ + dnsdist/dnsdist.conf \ + dockerd/dockerd.conf \ + dovecot/dovecot.conf \ + elasticsearch/elasticsearch.conf \ + example/example.conf \ + exim/exim.conf \ + fail2ban/fail2ban.conf \ + freeradius/freeradius.conf \ + go_expvar/go_expvar.conf \ + haproxy/haproxy.conf \ + hddtemp/hddtemp.conf \ + httpcheck/httpcheck.conf \ + icecast/icecast.conf \ + ipfs/ipfs.conf \ + isc_dhcpd/isc_dhcpd.conf \ + linux_power_supply/linux_power_supply.conf \ + litespeed/litespeed.conf \ + logind/logind.conf \ + mdstat/mdstat.conf \ + megacli/megacli.conf \ + memcached/memcached.conf \ + mongodb/mongodb.conf \ + monit/monit.conf \ + mysql/mysql.conf \ + nginx/nginx.conf \ + nginx_plus/nginx_plus.conf \ + nsd/nsd.conf \ + ntpd/ntpd.conf \ + ovpn_status_log/ovpn_status_log.conf \ + phpfpm/phpfpm.conf \ + portcheck/portcheck.conf \ + postfix/postfix.conf \ + postgres/postgres.conf \ + powerdns/powerdns.conf \ + puppet/puppet.conf \ + rabbitmq/rabbitmq.conf \ + redis/redis.conf \ + rethinkdbs/rethinkdbs.conf \ + retroshare/retroshare.conf \ + samba/samba.conf \ + sensors/sensors.conf \ + springboot/springboot.conf \ + spigotmc/spigotmc.conf \ + squid/squid.conf \ + smartd_log/smartd_log.conf \ + tomcat/tomcat.conf \ + traefik/traefik.conf \ + unbound/unbound.conf \ + varnish/varnish.conf \ + w1sensor/w1sensor.conf \ + web_log/web_log.conf \ + $(NULL) + dist_python_SCRIPTS = \ $(NULL) dist_python_DATA = \ - README.md \ - apache.chart.py \ - beanstalk.chart.py \ - bind_rndc.chart.py \ - boinc.chart.py \ - ceph.chart.py \ - chrony.chart.py \ - couchdb.chart.py \ - cpufreq.chart.py \ - cpuidle.chart.py \ - dns_query_time.chart.py \ - dnsdist.chart.py \ - dockerd.chart.py \ - dovecot.chart.py \ - elasticsearch.chart.py \ - example.chart.py \ - exim.chart.py \ - fail2ban.chart.py \ - freeradius.chart.py \ - go_expvar.chart.py \ - haproxy.chart.py \ - hddtemp.chart.py \ - httpcheck.chart.py \ - icecast.chart.py \ - ipfs.chart.py \ - isc_dhcpd.chart.py \ - linux_power_supply.chart.py \ - litespeed.chart.py \ - logind.chart.py \ - mdstat.chart.py \ - megacli.chart.py \ - memcached.chart.py \ - mongodb.chart.py \ - monit.chart.py \ - mysql.chart.py \ - nginx.chart.py \ - nginx_plus.chart.py \ - nsd.chart.py \ - ntpd.chart.py \ - ovpn_status_log.chart.py \ - phpfpm.chart.py \ - portcheck.chart.py \ - postfix.chart.py \ - postgres.chart.py \ - powerdns.chart.py \ - puppet.chart.py \ - rabbitmq.chart.py \ - redis.chart.py \ - rethinkdbs.chart.py \ - retroshare.chart.py \ - samba.chart.py \ - sensors.chart.py \ - spigotmc.chart.py \ - springboot.chart.py \ - squid.chart.py \ - smartd_log.chart.py \ - tomcat.chart.py \ - traefik.chart.py \ - unbound.chart.py \ - varnish.chart.py \ - w1sensor.chart.py \ - web_log.chart.py \ + apache/apache.chart.py \ + beanstalk/beanstalk.chart.py \ + bind_rndc/bind_rndc.chart.py \ + boinc/boinc.chart.py \ + ceph/ceph.chart.py \ + chrony/chrony.chart.py \ + couchdb/couchdb.chart.py \ + cpufreq/cpufreq.chart.py \ + cpuidle/cpuidle.chart.py \ + dns_query_time/dns_query_time.chart.py \ + dnsdist/dnsdist.chart.py \ + dockerd/dockerd.chart.py \ + dovecot/dovecot.chart.py \ + elasticsearch/elasticsearch.chart.py \ + example/example.chart.py \ + exim/exim.chart.py \ + fail2ban/fail2ban.chart.py \ + freeradius/freeradius.chart.py \ + go_expvar/go_expvar.chart.py \ + haproxy/haproxy.chart.py \ + hddtemp/hddtemp.chart.py \ + httpcheck/httpcheck.chart.py \ + icecast/icecast.chart.py \ + ipfs/ipfs.chart.py \ + isc_dhcpd/isc_dhcpd.chart.py \ + linux_power_supply/linux_power_supply.chart.py \ + litespeed/litespeed.chart.py \ + logind/logind.chart.py \ + mdstat/mdstat.chart.py \ + megacli/megacli.chart.py \ + memcached/memcached.chart.py \ + mongodb/mongodb.chart.py \ + monit/monit.chart.py \ + mysql/mysql.chart.py \ + nginx/nginx.chart.py \ + nginx_plus/nginx_plus.chart.py \ + nsd/nsd.chart.py \ + ntpd/ntpd.chart.py \ + ovpn_status_log/ovpn_status_log.chart.py \ + phpfpm/phpfpm.chart.py \ + portcheck/portcheck.chart.py \ + postfix/postfix.chart.py \ + postgres/postgres.chart.py \ + powerdns/powerdns.chart.py \ + puppet/puppet.chart.py \ + rabbitmq/rabbitmq.chart.py \ + redis/redis.chart.py \ + rethinkdbs/rethinkdbs.chart.py \ + retroshare/retroshare.chart.py \ + samba/samba.chart.py \ + sensors/sensors.chart.py \ + spigotmc/spigotmc.chart.py \ + springboot/springboot.chart.py \ + squid/squid.chart.py \ + smartd_log/smartd_log.chart.py \ + tomcat/tomcat.chart.py \ + traefik/traefik.chart.py \ + unbound/unbound.chart.py \ + varnish/varnish.chart.py \ + w1sensor/w1sensor.chart.py \ + web_log/web_log.chart.py \ $(NULL) pythonmodulesdir=$(pythondir)/python_modules diff --git a/collectors/python.d.plugin/README.md b/collectors/python.d.plugin/README.md new file mode 100644 index 0000000000..df24cd18fa --- /dev/null +++ b/collectors/python.d.plugin/README.md @@ -0,0 +1,198 @@ +# python.d.plugin + +`python.d.plugin` is a netdata external plugin. It is an **orchestrator** for data collection modules written in `python`. + +1. It runs as an independent process `ps fax` shows it +2. It is started and stopped automatically by netdata +3. It communicates with netdata via a unidirectional pipe (sending data to the netdata daemon) +4. Supports any number of data collection **modules** +5. Allows each **module** to have one or more data collection **jobs** +6. Each **job** is collecting one or more metrics from a single data source + + +## Disclaimer + +Every module should be compatible with python2 and python3. +All third party libraries should be installed system-wide or in `python_modules` directory. +Module configurations are written in YAML and **pyYAML is required**. + +Every configuration file must have one of two formats: + +- Configuration for only one job: + +```yaml +update_every : 2 # update frequency +retries : 1 # how many failures in update() is tolerated +priority : 20000 # where it is shown on dashboard + +other_var1 : bla # variables passed to module +other_var2 : alb +``` + +- Configuration for many jobs (ex. mysql): + +```yaml +# module defaults: +update_every : 2 +retries : 1 +priority : 20000 + +local: # job name + update_every : 5 # job update frequency + other_var1 : some_val # module specific variable + +other_job: + priority : 5 # job position on dashboard + retries : 20 # job retries + other_var2 : val # module specific variable +``` + +`update_every`, `retries`, and `priority` are always optional. + +--- + +## How to write a new module + +Writing new python module is simple. You just need to remember to include 5 major things: +- **ORDER** global list +- **CHART** global dictionary +- **Service** class +- **_get_data** method +- all code needs to be compatible with Python 2 (**≥ 2.7**) *and* 3 (**≥ 3.1**) + +If you plan to submit the module in a PR, make sure and go through the [PR checklist for new modules](https://github.com/netdata/netdata/wiki/New-Module-PR-Checklist) beforehand to make sure you have updated all the files you need to. + +### Global variables `ORDER` and `CHART` + +`ORDER` list should contain the order of chart ids. Example: +```py +ORDER = ['first_chart', 'second_chart', 'third_chart'] +``` + +`CHART` dictionary is a little bit trickier. It should contain the chart definition in following format: +```py +CHART = { + id: { + 'options': [name, title, units, family, context, charttype], + 'lines': [ + [unique_dimension_name, name, algorithm, multiplier, divisor] + ]} +``` + +All names are better explained in the [External Plugins](../) section. +Parameters like `priority` and `update_every` are handled by `python.d.plugin`. + +### `Service` class + +Every module needs to implement its own `Service` class. This class should inherit from one of the framework classes: + +- `SimpleService` +- `UrlService` +- `SocketService` +- `LogService` +- `ExecutableService` + +Also it needs to invoke the parent class constructor in a specific way as well as assign global variables to class variables. + +Simple example: +```py +from base import UrlService +class Service(UrlService): + def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS +``` + +### `_get_data` collector/parser + +This method should grab raw data from `_get_raw_data`, parse it, and return a dictionary where keys are unique dimension names or `None` if no data is collected. + +Example: +```py +def _get_data(self): + try: + raw = self._get_raw_data().split(" ") + return {'active': int(raw[2])} + except (ValueError, AttributeError): + return None +``` + +More about framework classes +============================ + +Every framework class has some user-configurable variables which are specific to this particular class. Those variables should have default values initialized in the child class constructor. + +If module needs some additional user-configurable variable, it can be accessed from the `self.configuration` list and assigned in constructor or custom `check` method. Example: +```py +def __init__(self, configuration=None, name=None): + UrlService.__init__(self, configuration=configuration, name=name) + try: + self.baseurl = str(self.configuration['baseurl']) + except (KeyError, TypeError): + self.baseurl = "http://localhost:5001" +``` + +Classes implement `_get_raw_data` which should be used to grab raw data. This method usually returns a list of strings. + +### `SimpleService` + +_This is last resort class, if a new module cannot be written by using other framework class this one can be used._ + +_Example: `mysql`, `sensors`_ + +It is the lowest-level class which implements most of module logic, like: +- threading +- handling run times +- chart formatting +- logging +- chart creation and updating + +### `LogService` + +_Examples: `apache_cache`, `nginx_log`_ + +_Variable from config file_: `log_path`. + +Object created from this class reads new lines from file specified in `log_path` variable. It will check if file exists and is readable. Also `_get_raw_data` returns list of strings where each string is one line from file specified in `log_path`. + +### `ExecutableService` + +_Examples: `exim`, `postfix`_ + +_Variable from config file_: `command`. + +This allows to execute a shell command in a secure way. It will check for invalid characters in `command` variable and won't proceed if there is one of: +- '&' +- '|' +- ';' +- '>' +- '<' + +For additional security it uses python `subprocess.Popen` (without `shell=True` option) to execute command. Command can be specified with absolute or relative name. When using relative name, it will try to find `command` in `PATH` environment variable as well as in `/sbin` and `/usr/sbin`. + +`_get_raw_data` returns list of decoded lines returned by `command`. + +### UrlService + +_Examples: `apache`, `nginx`, `tomcat`_ + +_Variables from config file_: `url`, `user`, `pass`. + +If data is grabbed by accessing service via HTTP protocol, this class can be used. It can handle HTTP Basic Auth when specified with `user` and `pass` credentials. + +`_get_raw_data` returns list of utf-8 decoded strings (lines). + +### SocketService + +_Examples: `dovecot`, `redis`_ + +_Variables from config file_: `unix_socket`, `host`, `port`, `request`. + +Object will try execute `request` using either `unix_socket` or TCP/IP socket with combination of `host` and `port`. This can access unix sockets with SOCK_STREAM or SOCK_DGRAM protocols and TCP/IP sockets in version 4 and 6 with SOCK_STREAM setting. + +Sockets are accessed in non-blocking mode with 15 second timeout. + +After every execution of `_get_raw_data` socket is closed, to prevent this module needs to set `_keep_alive` variable to `True` and implement custom `_check_raw_data` method. + +`_check_raw_data` should take raw data and return `True` if all data is received otherwise it should return `False`. Also it should do it in fast and efficient way. \ No newline at end of file diff --git a/collectors/python.d.plugin/apache/README.md b/collectors/python.d.plugin/apache/README.md new file mode 100644 index 0000000000..c6d1d126a7 --- /dev/null +++ b/collectors/python.d.plugin/apache/README.md @@ -0,0 +1,59 @@ +# apache + +This module will monitor one or more Apache servers depending on configuration. + +**Requirements:** + * apache with enabled `mod_status` + +It produces the following charts: + +1. **Requests** in requests/s + * requests + +2. **Connections** + * connections + +3. **Async Connections** + * keepalive + * closing + * writing + +4. **Bandwidth** in kilobytes/s + * sent + +5. **Workers** + * idle + * busy + +6. **Lifetime Avg. Requests/s** in requests/s + * requests_sec + +7. **Lifetime Avg. Bandwidth/s** in kilobytes/s + * size_sec + +8. **Lifetime Avg. Response Size** in bytes/request + * size_req + +### configuration + +Needs only `url` to server's `server-status?auto` + +Here is an example for 2 servers: + +```yaml +update_every : 10 +priority : 90100 + +local: + url : 'http://localhost/server-status?auto' + retries : 20 + +remote: + url : 'http://www.apache.org/server-status?auto' + update_every : 5 + retries : 4 +``` + +Without configuration, module attempts to connect to `http://localhost/server-status?auto` + +--- diff --git a/python.d/apache.chart.py b/collectors/python.d.plugin/apache/apache.chart.py similarity index 100% rename from python.d/apache.chart.py rename to collectors/python.d.plugin/apache/apache.chart.py diff --git a/conf.d/python.d/apache.conf b/collectors/python.d.plugin/apache/apache.conf similarity index 100% rename from conf.d/python.d/apache.conf rename to collectors/python.d.plugin/apache/apache.conf diff --git a/collectors/python.d.plugin/beanstalk/README.md b/collectors/python.d.plugin/beanstalk/README.md new file mode 100644 index 0000000000..c2d7d57872 --- /dev/null +++ b/collectors/python.d.plugin/beanstalk/README.md @@ -0,0 +1,103 @@ +# beanstalk + +Module provides server and tube-level statistics: + +**Requirements:** + * `python-beanstalkc` + +**Server statistics:** + +1. **Cpu usage** in cpu time + * user + * system + +2. **Jobs rate** in jobs/s + * total + * timeouts + +3. **Connections rate** in connections/s + * connections + +4. **Commands rate** in commands/s + * put + * peek + * peek-ready + * peek-delayed + * peek-buried + * reserve + * use + * watch + * ignore + * delete + * release + * bury + * kick + * stats + * stats-job + * stats-tube + * list-tubes + * list-tube-used + * list-tubes-watched + * pause-tube + +5. **Current tubes** in tubes + * tubes + +6. **Current jobs** in jobs + * urgent + * ready + * reserved + * delayed + * buried + +7. **Current connections** in connections + * written + * producers + * workers + * waiting + +8. **Binlog** in records/s + * written + * migrated + +9. **Uptime** in seconds + * uptime + +**Per tube statistics:** + +1. **Jobs rate** in jobs/s + * jobs + +2. **Jobs** in jobs + * using + * ready + * reserved + * delayed + * buried + +3. **Connections** in connections + * using + * waiting + * watching + +4. **Commands** in commands/s + * deletes + * pauses + +5. **Pause** in seconds + * since + * left + + +### configuration + +Sample: + +```yaml +host : '127.0.0.1' +port : 11300 +``` + +If no configuration is given, module will attempt to connect to beanstalkd on `127.0.0.1:11300` address + +--- diff --git a/python.d/beanstalk.chart.py b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py similarity index 100% rename from python.d/beanstalk.chart.py rename to collectors/python.d.plugin/beanstalk/beanstalk.chart.py diff --git a/conf.d/python.d/beanstalk.conf b/collectors/python.d.plugin/beanstalk/beanstalk.conf similarity index 100% rename from conf.d/python.d/beanstalk.conf rename to collectors/python.d.plugin/beanstalk/beanstalk.conf diff --git a/collectors/python.d.plugin/bind_rndc/README.md b/collectors/python.d.plugin/bind_rndc/README.md new file mode 100644 index 0000000000..688297ab3b --- /dev/null +++ b/collectors/python.d.plugin/bind_rndc/README.md @@ -0,0 +1,60 @@ +# bind_rndc + +Module parses bind dump file to collect real-time performance metrics + +**Requirements:** + * Version of bind must be 9.6 + + * Netdata must have permissions to run `rndc stats` + +It produces: + +1. **Name server statistics** + * requests + * responses + * success + * auth_answer + * nonauth_answer + * nxrrset + * failure + * nxdomain + * recursion + * duplicate + * rejections + +2. **Incoming queries** + * RESERVED0 + * A + * NS + * CNAME + * SOA + * PTR + * MX + * TXT + * X25 + * AAAA + * SRV + * NAPTR + * A6 + * DS + * RSIG + * DNSKEY + * SPF + * ANY + * DLV + +3. **Outgoing queries** + * Same as Incoming queries + + +### configuration + +Sample: + +```yaml +local: + named_stats_path : '/var/log/bind/named.stats' +``` + +If no configuration is given, module will attempt to read named.stats file at `/var/log/bind/named.stats` + +--- diff --git a/python.d/bind_rndc.chart.py b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py similarity index 100% rename from python.d/bind_rndc.chart.py rename to collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py diff --git a/conf.d/python.d/bind_rndc.conf b/collectors/python.d.plugin/bind_rndc/bind_rndc.conf similarity index 100% rename from conf.d/python.d/bind_rndc.conf rename to collectors/python.d.plugin/bind_rndc/bind_rndc.conf diff --git a/collectors/python.d.plugin/boinc/README.md b/collectors/python.d.plugin/boinc/README.md new file mode 100644 index 0000000000..595bcd3c0b --- /dev/null +++ b/collectors/python.d.plugin/boinc/README.md @@ -0,0 +1,28 @@ +# boinc + +This module monitors task counts for the Berkely Open Infrastructure +Networking Computing (BOINC) distributed computing client using the same +RPC interface that the BOINC monitoring GUI does. + +It provides charts tracking the total number of tasks and active tasks, +as well as ones tracking each of the possible states for tasks. + +### configuration + +BOINC requires use of a password to access it's RPC interface. You can +find this password in the `gui_rpc_auth.cfg` file in your BOINC directory. + +By default, the module will try to auto-detect the password by looking +in `/var/lib/boinc` for this file (this is the location most Linux +distributions use for a system-wide BOINC installation), so things may +just work without needing configuration for the local system. + +You can monitor remote systems as well: + +```yaml +remote: + hostname: some-host + password: some-password +``` + +--- diff --git a/python.d/boinc.chart.py b/collectors/python.d.plugin/boinc/boinc.chart.py similarity index 100% rename from python.d/boinc.chart.py rename to collectors/python.d.plugin/boinc/boinc.chart.py diff --git a/conf.d/python.d/boinc.conf b/collectors/python.d.plugin/boinc/boinc.conf similarity index 100% rename from conf.d/python.d/boinc.conf rename to collectors/python.d.plugin/boinc/boinc.conf diff --git a/collectors/python.d.plugin/ceph/README.md b/collectors/python.d.plugin/ceph/README.md new file mode 100644 index 0000000000..29dfe5d1d1 --- /dev/null +++ b/collectors/python.d.plugin/ceph/README.md @@ -0,0 +1,32 @@ +# ceph + +This module monitors the ceph cluster usage and consuption data of a server. + +It produces: + +* Cluster statistics (usage, available, latency, objects, read/write rate) +* OSD usage +* OSD latency +* Pool usage +* Pool read/write operations +* Pool read/write rate +* number of objects per pool + +**Requirements:** + +- `rados` python module +- Granting read permissions to ceph group from keyring file +```shell +# chmod 640 /etc/ceph/ceph.client.admin.keyring +``` + +### Configuration + +Sample: +```yaml +local: + config_file: '/etc/ceph/ceph.conf' + keyring_file: '/etc/ceph/ceph.client.admin.keyring' +``` + +--- diff --git a/python.d/ceph.chart.py b/collectors/python.d.plugin/ceph/ceph.chart.py similarity index 100% rename from python.d/ceph.chart.py rename to collectors/python.d.plugin/ceph/ceph.chart.py diff --git a/conf.d/python.d/ceph.conf b/collectors/python.d.plugin/ceph/ceph.conf similarity index 100% rename from conf.d/python.d/ceph.conf rename to collectors/python.d.plugin/ceph/ceph.conf diff --git a/collectors/python.d.plugin/chrony/README.md b/collectors/python.d.plugin/chrony/README.md new file mode 100644 index 0000000000..30636fe772 --- /dev/null +++ b/collectors/python.d.plugin/chrony/README.md @@ -0,0 +1,31 @@ +# chrony + +This module monitors the precision and statistics of a local chronyd server. + +It produces: + +* frequency +* last offset +* RMS offset +* residual freq +* root delay +* root dispersion +* skew +* system time + +**Requirements:** +Verify that user netdata can execute `chronyc tracking`. If necessary, update `/etc/chrony.conf`, `cmdallow`. + +### Configuration + +Sample: +```yaml +# data collection frequency: +update_every: 1 + +# chrony query command: +local: + command: 'chronyc -n tracking' +``` + +--- diff --git a/python.d/chrony.chart.py b/collectors/python.d.plugin/chrony/chrony.chart.py similarity index 100% rename from python.d/chrony.chart.py rename to collectors/python.d.plugin/chrony/chrony.chart.py diff --git a/conf.d/python.d/chrony.conf b/collectors/python.d.plugin/chrony/chrony.conf similarity index 100% rename from conf.d/python.d/chrony.conf rename to collectors/python.d.plugin/chrony/chrony.conf diff --git a/collectors/python.d.plugin/couchdb/README.md b/collectors/python.d.plugin/couchdb/README.md new file mode 100644 index 0000000000..eff8c08103 --- /dev/null +++ b/collectors/python.d.plugin/couchdb/README.md @@ -0,0 +1,35 @@ +# couchdb + +This module monitors vital statistics of a local Apache CouchDB 2.x server, including: + +* Overall server reads/writes +* HTTP traffic breakdown + * Request methods (`GET`, `PUT`, `POST`, etc.) + * Response status codes (`200`, `201`, `4xx`, etc.) +* Active server tasks +* Replication status (CouchDB 2.1 and up only) +* Erlang VM stats +* Optional per-database statistics: sizes, # of docs, # of deleted docs + +### Configuration + +Sample for a local server running on port 5984: +```yaml +local: + user: 'admin' + pass: 'password' + node: 'couchdb@127.0.0.1' +``` + +Be sure to specify a correct admin-level username and password. + +You may also need to change the `node` name; this should match the value of `-name NODENAME` in your CouchDB's `etc/vm.args` file. Typically this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` / `couchdb@localhost` for a single-node server. + +If you want per-database statistics, these need to be added to the configuration, separated by spaces: +```yaml +local: + ... + databases: 'db1 db2 db3 ...' +``` + +--- diff --git a/python.d/couchdb.chart.py b/collectors/python.d.plugin/couchdb/couchdb.chart.py similarity index 100% rename from python.d/couchdb.chart.py rename to collectors/python.d.plugin/couchdb/couchdb.chart.py diff --git a/conf.d/python.d/couchdb.conf b/collectors/python.d.plugin/couchdb/couchdb.conf similarity index 100% rename from conf.d/python.d/couchdb.conf rename to collectors/python.d.plugin/couchdb/couchdb.conf diff --git a/collectors/python.d.plugin/cpufreq/README.md b/collectors/python.d.plugin/cpufreq/README.md new file mode 100644 index 0000000000..33891d59da --- /dev/null +++ b/collectors/python.d.plugin/cpufreq/README.md @@ -0,0 +1,30 @@ +# cpufreq + +This module shows the current CPU frequency as set by the cpufreq kernel +module. + +**Requirement:** +You need to have `CONFIG_CPU_FREQ` and (optionally) `CONFIG_CPU_FREQ_STAT` +enabled in your kernel. + +This module tries to read from one of two possible locations. On +initialization, it tries to read the `time_in_state` files provided by +cpufreq\_stats. If this file does not exist, or doesn't contain valid data, it +falls back to using the more inaccurate `scaling_cur_freq` file (which only +represents the **current** CPU frequency, and doesn't account for any state +changes which happen between updates). + +It produces one chart with multiple lines (one line per core). + +### configuration + +Sample: + +```yaml +sys_dir: "/sys/devices" +``` + +If no configuration is given, module will search for cpufreq files in `/sys/devices` directory. +Directory is also prefixed with `NETDATA_HOST_PREFIX` if specified. + +--- diff --git a/python.d/cpufreq.chart.py b/collectors/python.d.plugin/cpufreq/cpufreq.chart.py similarity index 100% rename from python.d/cpufreq.chart.py rename to collectors/python.d.plugin/cpufreq/cpufreq.chart.py diff --git a/conf.d/python.d/cpufreq.conf b/collectors/python.d.plugin/cpufreq/cpufreq.conf similarity index 100% rename from conf.d/python.d/cpufreq.conf rename to collectors/python.d.plugin/cpufreq/cpufreq.conf diff --git a/collectors/python.d.plugin/cpuidle/README.md b/collectors/python.d.plugin/cpuidle/README.md new file mode 100644 index 0000000000..4951696383 --- /dev/null +++ b/collectors/python.d.plugin/cpuidle/README.md @@ -0,0 +1,11 @@ +# cpuidle + +This module monitors the usage of CPU idle states. + +**Requirement:** +Your kernel needs to have `CONFIG_CPU_IDLE` enabled. + +It produces one stacked chart per CPU, showing the percentage of time spent in +each state. + +--- diff --git a/python.d/cpuidle.chart.py b/collectors/python.d.plugin/cpuidle/cpuidle.chart.py similarity index 100% rename from python.d/cpuidle.chart.py rename to collectors/python.d.plugin/cpuidle/cpuidle.chart.py diff --git a/conf.d/python.d/cpuidle.conf b/collectors/python.d.plugin/cpuidle/cpuidle.conf similarity index 100% rename from conf.d/python.d/cpuidle.conf rename to collectors/python.d.plugin/cpuidle/cpuidle.conf diff --git a/collectors/python.d.plugin/dns_query_time/README.md b/collectors/python.d.plugin/dns_query_time/README.md new file mode 100644 index 0000000000..3703e8aaf6 --- /dev/null +++ b/collectors/python.d.plugin/dns_query_time/README.md @@ -0,0 +1,10 @@ +# dns_query_time + +This module provides DNS query time statistics. + +**Requirement:** +* `python-dnspython` package + +It produces one aggregate chart or one chart per DNS server, showing the query time. + +--- diff --git a/python.d/dns_query_time.chart.py b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py similarity index 100% rename from python.d/dns_query_time.chart.py rename to collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py diff --git a/conf.d/python.d/dns_query_time.conf b/collectors/python.d.plugin/dns_query_time/dns_query_time.conf similarity index 100% rename from conf.d/python.d/dns_query_time.conf rename to collectors/python.d.plugin/dns_query_time/dns_query_time.conf diff --git a/collectors/python.d.plugin/dnsdist/README.md b/collectors/python.d.plugin/dnsdist/README.md new file mode 100644 index 0000000000..b646ae27c9 --- /dev/null +++ b/collectors/python.d.plugin/dnsdist/README.md @@ -0,0 +1,54 @@ +# dnsdist + +Module monitor dnsdist performance and health metrics. + +Following charts are drawn: + +1. **Response latency** + * latency-slow + * latency100-1000 + * latency50-100 + * latency10-50 + * latency1-10 + * latency0-1 + +2. **Cache performance** + * cache-hits + * cache-misses + +3. **ACL events** + * acl-drops + * rule-drop + * rule-nxdomain + * rule-refused + +4. **Noncompliant data** + * empty-queries + * no-policy + * noncompliant-queries + * noncompliant-responses + +5. **Queries** + * queries + * rdqueries + * rdqueries + +6. **Health** + * downstream-send-errors + * downstream-timeouts + * servfail-responses + * trunc-failures + +### configuration + +```yaml +localhost: + name : 'local' + url : 'http://127.0.0.1:5053/jsonstat?command=stats' + user : 'username' + pass : 'password' + header: + X-API-Key: 'dnsdist-api-key' +``` + +--- diff --git a/python.d/dnsdist.chart.py b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py similarity index 100% rename from python.d/dnsdist.chart.py rename to collectors/python.d.plugin/dnsdist/dnsdist.chart.py diff --git a/conf.d/python.d/dnsdist.conf b/collectors/python.d.plugin/dnsdist/dnsdist.conf similarity index 100% rename from conf.d/python.d/dnsdist.conf rename to collectors/python.d.plugin/dnsdist/dnsdist.conf diff --git a/collectors/python.d.plugin/dockerd/README.md b/collectors/python.d.plugin/dockerd/README.md new file mode 100644 index 0000000000..d3f6038084 --- /dev/null +++ b/collectors/python.d.plugin/dockerd/README.md @@ -0,0 +1,26 @@ +# dockerd + +Module monitor docker health metrics. + +**Requirement:** +* `docker` package + +Following charts are drawn: + +1. **running containers** + * count + +2. **healthy containers** + * count + +3. **unhealthy containers** + * count + +### configuration + +```yaml + update_every : 1 + priority : 60000 + ``` + +--- diff --git a/python.d/dockerd.chart.py b/collectors/python.d.plugin/dockerd/dockerd.chart.py similarity index 100% rename from python.d/dockerd.chart.py rename to collectors/python.d.plugin/dockerd/dockerd.chart.py diff --git a/conf.d/python.d/dockerd.conf b/collectors/python.d.plugin/dockerd/dockerd.conf similarity index 100% rename from conf.d/python.d/dockerd.conf rename to collectors/python.d.plugin/dockerd/dockerd.conf diff --git a/collectors/python.d.plugin/dovecot/README.md b/collectors/python.d.plugin/dovecot/README.md new file mode 100644 index 0000000000..50950ecc11 --- /dev/null +++ b/collectors/python.d.plugin/dovecot/README.md @@ -0,0 +1,73 @@ +# dovecot + +This module provides statistics information from Dovecot server. +Statistics are taken from dovecot socket by executing `EXPORT global` command. +More information about dovecot stats can be found on [project wiki page.](http://wiki2.dovecot.org/Statistics) + +**Requirement:** +Dovecot UNIX socket with R/W permissions for user netdata or Dovecot with configured TCP/IP socket. + +Module gives information with following charts: + +1. **sessions** + * active sessions + +2. **logins** + * logins + +3. **commands** - number of IMAP commands + * commands + +4. **Faults** + * minor + * major + +5. **Context Switches** + * volountary + * involountary + +6. **disk** in bytes/s + * read + * write + +7. **bytes** in bytes/s + * read + * write + +8. **number of syscalls** in syscalls/s + * read + * write + +9. **lookups** - number of lookups per second + * path + * attr + +10. **hits** - number of cache hits + * hits + +11. **attempts** - authorization attempts + * success + * failure + +12. **cache** - cached authorization hits + * hit + * miss + +### configuration + +Sample: + +```yaml +localtcpip: + name : 'local' + host : '127.0.0.1' + port : 24242 + +localsocket: + name : 'local' + socket : '/var/run/dovecot/stats' +``` + +If no configuration is given, module will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats` + +--- diff --git a/python.d/dovecot.chart.py b/collectors/python.d.plugin/dovecot/dovecot.chart.py similarity index 100% rename from python.d/dovecot.chart.py rename to collectors/python.d.plugin/dovecot/dovecot.chart.py diff --git a/conf.d/python.d/dovecot.conf b/collectors/python.d.plugin/dovecot/dovecot.conf similarity index 100% rename from conf.d/python.d/dovecot.conf rename to collectors/python.d.plugin/dovecot/dovecot.conf diff --git a/collectors/python.d.plugin/elasticsearch/README.md b/collectors/python.d.plugin/elasticsearch/README.md new file mode 100644 index 0000000000..75e17015b6 --- /dev/null +++ b/collectors/python.d.plugin/elasticsearch/README.md @@ -0,0 +1,60 @@ +# elasticsearch + +This module monitors Elasticsearch performance and health metrics. + +It produces: + +1. **Search performance** charts: + * Number of queries, fetches + * Time spent on queries, fetches + * Query and fetch latency + +2. **Indexing performance** charts: + * Number of documents indexed, index refreshes, flushes + * Time spent on indexing, refreshing, flushing + * Indexing and flushing latency + +3. **Memory usage and garbace collection** charts: + * JVM heap currently in use, committed + * Count of garbage collections + * Time spent on garbage collections + +4. **Host metrics** charts: + * Available file descriptors in percent + * Opened HTTP connections + * Cluster communication transport metrics + +5. **Queues and rejections** charts: + * Number of queued/rejected threads in thread pool + +6. **Fielddata cache** charts: + * Fielddata cache size + * Fielddata evictions and circuit breaker tripped count + +7. **Cluster health API** charts: + * Cluster status + * Nodes and tasks statistics + * Shards statistics + +8. **Cluster stats API** charts: + * Nodes statistics + * Query cache statistics + * Docs statistics + * Store statistics + * Indices and shards statistics + +### configuration + +Sample: + +```yaml +local: + host : 'ipaddress' # Server ip address or hostname + port : 'password' # Port on which elasticsearch listed + cluster_health : True/False # Calls to cluster health elasticsearch API. Enabled by default. + cluster_stats : True/False # Calls to cluster stats elasticsearch API. Enabled by default. +``` + +If no configuration is given, module will fail to run. + +--- diff --git a/python.d/elasticsearch.chart.py b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py similarity index 100% rename from python.d/elasticsearch.chart.py rename to collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py diff --git a/conf.d/python.d/elasticsearch.conf b/collectors/python.d.plugin/elasticsearch/elasticsearch.conf similarity index 100% rename from conf.d/python.d/elasticsearch.conf rename to collectors/python.d.plugin/elasticsearch/elasticsearch.conf diff --git a/collectors/python.d.plugin/example/README.md b/collectors/python.d.plugin/example/README.md new file mode 100644 index 0000000000..f9f314ac43 --- /dev/null +++ b/collectors/python.d.plugin/example/README.md @@ -0,0 +1 @@ +An example python data collection module. \ No newline at end of file diff --git a/python.d/example.chart.py b/collectors/python.d.plugin/example/example.chart.py similarity index 100% rename from python.d/example.chart.py rename to collectors/python.d.plugin/example/example.chart.py diff --git a/conf.d/python.d/example.conf b/collectors/python.d.plugin/example/example.conf similarity index 100% rename from conf.d/python.d/example.conf rename to collectors/python.d.plugin/example/example.conf diff --git a/collectors/python.d.plugin/exim/README.md b/collectors/python.d.plugin/exim/README.md new file mode 100644 index 0000000000..b9a62cad9d --- /dev/null +++ b/collectors/python.d.plugin/exim/README.md @@ -0,0 +1,13 @@ +# exim + +Simple module executing `exim -bpc` to grab exim queue. +This command can take a lot of time to finish its execution thus it is not recommended to run it every second. + +It produces only one chart: + +1. **Exim Queue Emails** + * emails + +Configuration is not needed. + +--- diff --git a/python.d/exim.chart.py b/collectors/python.d.plugin/exim/exim.chart.py similarity index 100% rename from python.d/exim.chart.py rename to collectors/python.d.plugin/exim/exim.chart.py diff --git a/conf.d/python.d/exim.conf b/collectors/python.d.plugin/exim/exim.conf similarity index 100% rename from conf.d/python.d/exim.conf rename to collectors/python.d.plugin/exim/exim.conf diff --git a/collectors/python.d.plugin/fail2ban/README.md b/collectors/python.d.plugin/fail2ban/README.md new file mode 100644 index 0000000000..2ab0219653 --- /dev/null +++ b/collectors/python.d.plugin/fail2ban/README.md @@ -0,0 +1,23 @@ +# fail2ban + +Module monitor fail2ban log file to show all bans for all active jails + +**Requirements:** + * fail2ban.log file MUST BE readable by netdata (A good idea is to add **create 0640 root netdata** to fail2ban conf at logrotate.d) + +It produces one chart with multiple lines (one line per jail) + +### configuration + +Sample: + +```yaml +local: + log_path: '/var/log/fail2ban.log' + conf_path: '/etc/fail2ban/jail.local' + exclude: 'dropbear apache' +``` +If no configuration is given, module will attempt to read log file at `/var/log/fail2ban.log` and conf file at `/etc/fail2ban/jail.local`. +If conf file is not found default jail is `ssh`. + +--- diff --git a/python.d/fail2ban.chart.py b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py similarity index 100% rename from python.d/fail2ban.chart.py rename to collectors/python.d.plugin/fail2ban/fail2ban.chart.py diff --git a/conf.d/python.d/fail2ban.conf b/collectors/python.d.plugin/fail2ban/fail2ban.conf similarity index 100% rename from conf.d/python.d/fail2ban.conf rename to collectors/python.d.plugin/fail2ban/fail2ban.conf diff --git a/collectors/python.d.plugin/freeradius/README.md b/collectors/python.d.plugin/freeradius/README.md new file mode 100644 index 0000000000..e5fe88ec39 --- /dev/null +++ b/collectors/python.d.plugin/freeradius/README.md @@ -0,0 +1,70 @@ +# freeradius + +Uses the `radclient` command to provide freeradius statistics. It is not recommended to run it every second. + +It produces: + +1. **Authentication counters:** + * access-accepts + * access-rejects + * auth-dropped-requests + * auth-duplicate-requests + * auth-invalid-requests + * auth-malformed-requests + * auth-unknown-types + +2. **Accounting counters:** [optional] + * accounting-requests + * accounting-responses + * acct-dropped-requests + * acct-duplicate-requests + * acct-invalid-requests + * acct-malformed-requests + * acct-unknown-types + +3. **Proxy authentication counters:** [optional] + * proxy-access-accepts + * proxy-access-rejects + * proxy-auth-dropped-requests + * proxy-auth-duplicate-requests + * proxy-auth-invalid-requests + * proxy-auth-malformed-requests + * proxy-auth-unknown-types + +4. **Proxy accounting counters:** [optional] + * proxy-accounting-requests + * proxy-accounting-responses + * proxy-acct-dropped-requests + * proxy-acct-duplicate-requests + * proxy-acct-invalid-requests + * proxy-acct-malformed-requests + * proxy-acct-unknown-typesa + + +### configuration + +Sample: + +```yaml +local: + host : 'localhost' + port : '18121' + secret : 'adminsecret' + acct : False # Freeradius accounting statistics. + proxy_auth : False # Freeradius proxy authentication statistics. + proxy_acct : False # Freeradius proxy accounting statistics. +``` + +**Freeradius server configuration:** + +The configuration for the status server is automatically created in the sites-available directory. +By default, server is enabled and can be queried from every client. +FreeRADIUS will only respond to status-server messages, if the status-server virtual server has been enabled. + +To do this, create a link from the sites-enabled directory to the status file in the sites-available directory: + * cd sites-enabled + * ln -s ../sites-available/status status + +and restart/reload your FREERADIUS server. + +--- diff --git a/python.d/freeradius.chart.py b/collectors/python.d.plugin/freeradius/freeradius.chart.py similarity index 100% rename from python.d/freeradius.chart.py rename to collectors/python.d.plugin/freeradius/freeradius.chart.py diff --git a/conf.d/python.d/freeradius.conf b/collectors/python.d.plugin/freeradius/freeradius.conf similarity index 100% rename from conf.d/python.d/freeradius.conf rename to collectors/python.d.plugin/freeradius/freeradius.conf diff --git a/collectors/python.d.plugin/go_expvar/README.md b/collectors/python.d.plugin/go_expvar/README.md new file mode 100644 index 0000000000..ec5dbdc400 --- /dev/null +++ b/collectors/python.d.plugin/go_expvar/README.md @@ -0,0 +1,244 @@ +# go_expvar + +The `go_expvar` module can monitor any Go application that exposes its metrics with the use of `expvar` package from the Go standard library. + +`go_expvar` produces charts for Go runtime memory statistics and optionally any number of custom charts. +Please see the [wiki page](https://github.com/netdata/netdata/wiki/Monitoring-Go-Applications) for more info. + +For the memory statistics, it produces the following charts: + +1. **Heap allocations** in kB + * alloc: size of objects allocated on the heap + * inuse: size of allocated heap spans + +2. **Stack allocations** in kB + * inuse: size of allocated stack spans + +3. **MSpan allocations** in kB + * inuse: size of allocated mspan structures + +4. **MCache allocations** in kB + * inuse: size of allocated mcache structures + +5. **Virtual memory** in kB + * sys: size of reserved virtual address space + +6. **Live objects** + * live: number of live objects in memory + +7. **GC pauses average** in ns + * avg: average duration of all GC stop-the-world pauses + + +## Monitoring Go Applications + +Netdata can be used to monitor running Go applications that expose their metrics with the use of the [expvar package](https://golang.org/pkg/expvar/) included in Go standard library. + +The `expvar` package exposes these metrics over HTTP and is very easy to use. Consider this minimal sample below: + +``` +package main + +import ( + _ "expvar" + "net/http" +) + +func main() { + http.ListenAndServe("127.0.0.1:8080", nil) +} +``` + +When imported this way, the `expvar` package registers a HTTP handler at `/debug/vars` that exposes Go runtime's memory statistics in JSON format. You can inspect the output by opening the URL in your browser (or by using `wget` or `curl`). Sample output: + +``` +{ +"cmdline": ["./expvar-demo-binary"], +"memstats": {"Alloc":630856,"TotalAlloc":630856,"Sys":3346432,"Lookups":27, <ommited for brevity>} +} +``` + +You can of course expose and monitor your own variables as well. Here is a sample Go application that exposes a few custom variables: + +``` +package main + +import ( + "expvar" + "net/http" + "runtime" + "time" +) + +func main() { + + tick := time.NewTicker(1 * time.Second) + num_go := expvar.NewInt("runtime.goroutines") + counters := expvar.NewMap("counters") + counters.Set("cnt1", new(expvar.Int)) + counters.Set("cnt2", new(expvar.Float)) + + go http.ListenAndServe(":8080", nil) + + for { + select { + case <- tick.C: + num_go.Set(int64(runtime.NumGoroutine())) + counters.Add("cnt1", 1) + counters.AddFloat("cnt2", 1.452) + } + } +} +``` + +Apart from the runtime memory stats, this application publishes two counters and the number of currently running Goroutines and updates these stats every second. + +In the next section, we will cover how to monitor and chart these exposed stats with the use of `netdata`s ```go_expvar``` module. + +### Using netdata go_expvar module + +The `go_expvar` module is disabled by default. To enable it, edit [`python.d.conf`](https://github.com/netdata/netdata/blob/master/conf.d/python.d.conf) (to edit it on your system run `/etc/netdata/edit-config python.d.conf`), and change the `go_expvar` variable to `yes`: + +``` +# Enable / Disable python.d.plugin modules +#default_run: yes +# +# If "default_run" = "yes" the default for all modules is enabled (yes). +# Setting any of these to "no" will disable it. +# +# If "default_run" = "no" the default for all modules is disabled (no). +# Setting any of these to "yes" will enable it. +... +go_expvar: yes +... +``` + +Next, we need to edit the module configuration file (found at [`/etc/netdata/python.d/go_expvar.conf`](https://github.com/netdata/netdata/blob/master/conf.d/python.d/go_expvar.conf) by default) (to edit it on your system run `/etc/netdata/edit-config python.d/go_expvar.conf`). The module configuration consists of jobs, where each job can be used to monitor a separate Go application. Let's see a sample job configuration: + +``` +# /etc/netdata/python.d/go_expvar.conf + +app1: + name : 'app1' + url : 'http://127.0.0.1:8080/debug/vars' + collect_memstats: true + extra_charts: {} +``` + +Let's go over each of the defined options: + + name: 'app1' + +This is the job name that will appear at the netdata dashboard. If not defined, the job_name (top level key) will be used. + + url: 'http://127.0.0.1:8080/debug/vars' + +This is the URL of the expvar endpoint. As the expvar handler can be installed in a custom path, the whole URL has to be specified. This value is mandatory. + + collect_memstats: true + +Whether to enable collecting stats about Go runtime's memory. You can find more information about the exposed values at the [runtime package docs](https://golang.org/pkg/runtime/#MemStats). + + extra_charts: {} + +Enables the user to specify custom expvars to monitor and chart. Will be explained in more detail below. + +**Note: if `collect_memstats` is disabled and no `extra_charts` are defined, the plugin will disable itself, as there will be no data to collect!** + +Apart from these options, each job supports options inherited from netdata's `python.d.plugin` and its base `UrlService` class. These are: + + update_every: 1 # the job's data collection frequency + priority: 60000 # the job's order on the dashboard + retries: 60 # the job's number of restoration attempts + user: admin # use when the expvar endpoint is protected by HTTP Basic Auth + password: sekret # use when the expvar endpoint is protected by HTTP Basic Auth + +### Monitoring custom vars with go_expvar + +Now, memory stats might be useful, but what if you want netdata to monitor some custom values that your Go application exposes? The `go_expvar` module can do that as well with the use of the `extra_charts` configuration variable. + +The `extra_charts` variable is a YaML list of netdata chart definitions. Each chart definition has the following keys: + + id: netdata chart ID + options: a key-value mapping of chart options + lines: a list of line definitions + +**Note: please do not use dots in the chart or line ID field. See [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.** + +Please see these two links to the official netdata documentation for more information about the values: + +- [External plugins - charts](https://github.com/netdata/netdata/wiki/External-Plugins#chart) +- [Chart variables](https://github.com/netdata/netdata/wiki/How-to-write-new-module#global-variables-order-and-chart) + +**Line definitions** + +Each chart can define multiple lines (dimensions). A line definition is a key-value mapping of line options. Each line can have the following options: + + # mandatory + expvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint + expvar_type: value type; supported are "float" or "int" + id: the id of this line/dimension in netdata + + # optional - netdata defaults are used if these options are not defined + name: '' + algorithm: absolute + multiplier: 1 + divisor: 100 if expvar_type == float, 1 if expvar_type == int + hidden: False + +Please see the following link for more information about the options and their default values: +[External plugins - dimensions](https://github.com/netdata/netdata/wiki/External-Plugins#dimension) + +Apart from top-level expvars, this plugin can also parse expvars stored in a multi-level map; All dicts in the resulting JSON document are then flattened to one level. Expvar names are joined together with '.' when flattening. + +Example: +``` +{ + "counters": {"cnt1": 1042, "cnt2": 1512.9839999999983}, + "runtime.goroutines": 5 +} +``` + +In the above case, the exported variables will be available under `runtime.goroutines`, `counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision, the first defined key wins and all subsequent keys with the same name are ignored. + +**Configuration example** + +The configuration below matches the second Go application described above. Netdata will monitor and chart memory stats for the application, as well as a custom chart of running goroutines and two dummy counters. + +``` +app1: + name : 'app1' + url : 'http://127.0.0.1:8080/debug/vars' + collect_memstats: true + extra_charts: + - id: "runtime_goroutines" + options: + name: num_goroutines + title: "runtime: number of goroutines" + units: goroutines + family: runtime + context: expvar.runtime.goroutines + chart_type: line + lines: + - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines} + - id: "foo_counters" + options: + name: counters + title: "some random counters" + units: awesomeness + family: counters + context: expvar.foo.counters + chart_type: line + lines: + - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1} + - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2} +``` + +**Netdata charts example** + +The images below show how do the final charts in netdata look. + + + + + diff --git a/python.d/go_expvar.chart.py b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py similarity index 100% rename from python.d/go_expvar.chart.py rename to collectors/python.d.plugin/go_expvar/go_expvar.chart.py diff --git a/conf.d/python.d/go_expvar.conf b/collectors/python.d.plugin/go_expvar/go_expvar.conf similarity index 100% rename from conf.d/python.d/go_expvar.conf rename to collectors/python.d.plugin/go_expvar/go_expvar.conf diff --git a/collectors/python.d.plugin/haproxy/README.md b/collectors/python.d.plugin/haproxy/README.md new file mode 100644 index 0000000000..4bff256709 --- /dev/null +++ b/collectors/python.d.plugin/haproxy/README.md @@ -0,0 +1,49 @@ +# haproxy + +Module monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current. +And health metrics such as backend servers status (server check should be used). + +Plugin can obtain data from url **OR** unix socket. + +**Requirement:** +Socket MUST be readable AND writable by netdata user. + +It produces: + +1. **Frontend** family charts + * Kilobytes in/s + * Kilobytes out/s + * Sessions current + * Sessions in queue current + +2. **Backend** family charts + * Kilobytes in/s + * Kilobytes out/s + * Sessions current + * Sessions in queue current + +3. **Health** chart + * number of failed servers for every backend (in DOWN state) + + +### configuration + +Sample: + +```yaml +via_url: + user : 'username' # ONLY IF stats auth is used + pass : 'password' # # ONLY IF stats auth is used + url : 'http://ip.address:port/url;csv;norefresh' +``` + +OR + +```yaml +via_socket: + socket : 'path/to/haproxy/sock' +``` + +If no configuration is given, module will fail to run. + +--- diff --git a/python.d/haproxy.chart.py b/collectors/python.d.plugin/haproxy/haproxy.chart.py similarity index 100% rename from python.d/haproxy.chart.py rename to collectors/python.d.plugin/haproxy/haproxy.chart.py diff --git a/conf.d/python.d/haproxy.conf b/collectors/python.d.plugin/haproxy/haproxy.conf similarity index 100% rename from conf.d/python.d/haproxy.conf rename to collectors/python.d.plugin/haproxy/haproxy.conf diff --git a/collectors/python.d.plugin/hddtemp/README.md b/collectors/python.d.plugin/hddtemp/README.md new file mode 100644 index 0000000000..1236186a52 --- /dev/null +++ b/collectors/python.d.plugin/hddtemp/README.md @@ -0,0 +1,22 @@ +# hddtemp + +Module monitors disk temperatures from one or more hddtemp daemons. + +**Requirement:** +Running `hddtemp` in daemonized mode with access on tcp port + +It produces one chart **Temperature** with dynamic number of dimensions (one per disk) + +### configuration + +Sample: + +```yaml +update_every: 3 +host: "127.0.0.1" +port: 7634 +``` + +If no configuration is given, module will attempt to connect to hddtemp daemon on `127.0.0.1:7634` address + +--- diff --git a/python.d/hddtemp.chart.py b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py similarity index 100% rename from python.d/hddtemp.chart.py rename to collectors/python.d.plugin/hddtemp/hddtemp.chart.py diff --git a/conf.d/python.d/hddtemp.conf b/collectors/python.d.plugin/hddtemp/hddtemp.conf similarity index 100% rename from conf.d/python.d/hddtemp.conf rename to collectors/python.d.plugin/hddtemp/hddtemp.conf diff --git a/collectors/python.d.plugin/httpcheck/README.md b/collectors/python.d.plugin/httpcheck/README.md new file mode 100644 index 0000000000..759107663d --- /dev/null +++ b/collectors/python.d.plugin/httpcheck/README.md @@ -0,0 +1,41 @@ +# httpcheck + +Module monitors remote http server for availability and response time. + +Following charts are drawn per job: + +1. **Response time** ms + * Time in 0.1 ms resolution in which the server responds. + If the connection failed, the value is missing. + +2. **Status** boolean + * Connection successful + * Unexpected content: No Regex match found in the response + * Unexpected status code: Do we get 500 errors? + * Connection failed: port not listening or blocked + * Connection timed out: host or port unreachable + +### configuration + +Sample configuration and their default values. + +```yaml +server: + url: 'http://host:port/path' # required + status_accepted: # optional + - 200 + timeout: 1 # optional, supports decimals (e.g. 0.2) + update_every: 3 # optional + regex: 'REGULAR_EXPRESSION' # optional, see https://docs.python.org/3/howto/regex.html + redirect: yes # optional +``` + +### notes + + * The status chart is primarily intended for alarms, badges or for access via API. + * A system/service/firewall might block netdata's access if a portscan or + similar is detected. + * This plugin is meant for simple use cases. Currently, the accuracy of the + response time is low and should be used as reference only. + +--- diff --git a/python.d/httpcheck.chart.py b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py similarity index 100% rename from python.d/httpcheck.chart.py rename to collectors/python.d.plugin/httpcheck/httpcheck.chart.py diff --git a/conf.d/python.d/httpcheck.conf b/collectors/python.d.plugin/httpcheck/httpcheck.conf similarity index 100% rename from conf.d/python.d/httpcheck.conf rename to collectors/python.d.plugin/httpcheck/httpcheck.conf diff --git a/collectors/python.d.plugin/icecast/README.md b/collectors/python.d.plugin/icecast/README.md new file mode 100644 index 0000000000..a28a6c3981 --- /dev/null +++ b/collectors/python.d.plugin/icecast/README.md @@ -0,0 +1,26 @@ +# icecast + +This module will monitor number of listeners for active sources. + +**Requirements:** + * icecast version >= 2.4.0 + +It produces the following charts: + +1. **Listeners** in listeners + * source number + +### configuration + +Needs only `url` to server's `/status-json.xsl` + +Here is an example for remote server: + +```yaml +remote: + url : 'http://1.2.3.4:8443/status-json.xsl' +``` + +Without configuration, module attempts to connect to `http://localhost:8443/status-json.xsl` + +--- diff --git a/python.d/icecast.chart.py b/collectors/python.d.plugin/icecast/icecast.chart.py similarity index 100% rename from python.d/icecast.chart.py rename to collectors/python.d.plugin/icecast/icecast.chart.py diff --git a/conf.d/python.d/icecast.conf b/collectors/python.d.plugin/icecast/icecast.conf similarity index 100% rename from conf.d/python.d/icecast.conf rename to collectors/python.d.plugin/icecast/icecast.conf diff --git a/collectors/python.d.plugin/ipfs/README.md b/collectors/python.d.plugin/ipfs/README.md new file mode 100644 index 0000000000..a30649a5f1 --- /dev/null +++ b/collectors/python.d.plugin/ipfs/README.md @@ -0,0 +1,25 @@ +# ipfs + +Module monitors [IPFS](https://ipfs.io) basic information. + +1. **Bandwidth** in kbits/s + * in + * out + +2. **Peers** + * peers + +### configuration + +Only url to IPFS server is needed. + +Sample: + +```yaml +localhost: + name : 'local' + url : 'http://localhost:5001' +``` + +--- + diff --git a/python.d/ipfs.chart.py b/collectors/python.d.plugin/ipfs/ipfs.chart.py similarity index 100% rename from python.d/ipfs.chart.py rename to collectors/python.d.plugin/ipfs/ipfs.chart.py diff --git a/conf.d/python.d/ipfs.conf b/collectors/python.d.plugin/ipfs/ipfs.conf similarity index 100% rename from conf.d/python.d/ipfs.conf rename to collectors/python.d.plugin/ipfs/ipfs.conf diff --git a/collectors/python.d.plugin/isc_dhcpd/README.md b/collectors/python.d.plugin/isc_dhcpd/README.md new file mode 100644 index 0000000000..334d86e337 --- /dev/null +++ b/collectors/python.d.plugin/isc_dhcpd/README.md @@ -0,0 +1,34 @@ +# isc_dhcpd + +Module monitor leases database to show all active leases for given pools. + +**Requirements:** + * dhcpd leases file MUST BE readable by netdata + * pools MUST BE in CIDR format + +It produces: + +1. **Pools utilization** Aggregate chart for all pools. + * utilization in percent + +2. **Total leases** + * leases (overall number of leases for all pools) + +3. **Active leases** for every pools + * leases (number of active leases in pool) + + +### configuration + +Sample: + +```yaml +local: + leases_path : '/var/lib/dhcp/dhcpd.leases' + pools : '192.168.3.0/24 192.168.4.0/24 192.168.5.0/24' +``` + +In case of python2 you need to install `py2-ipaddress` to make plugin work. +The module will not work If no configuration is given. + +--- diff --git a/python.d/isc_dhcpd.chart.py b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py similarity index 100% rename from python.d/isc_dhcpd.chart.py rename to collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py diff --git a/conf.d/python.d/isc_dhcpd.conf b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf similarity index 100% rename from conf.d/python.d/isc_dhcpd.conf rename to collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf diff --git a/collectors/python.d.plugin/linux_power_supply/README.md b/collectors/python.d.plugin/linux_power_supply/README.md new file mode 100644 index 0000000000..5cfbe41ce5 --- /dev/null +++ b/collectors/python.d.plugin/linux_power_supply/README.md @@ -0,0 +1,67 @@ +# linux\_power\_supply + +This module monitors variosu metrics reported by power supply drivers +on Linux. This allows tracking and alerting on things like remaining +battery capacity. + +Depending on the uderlying driver, it may provide the following charts +and metrics: + +1. Capacity: The power supply capacity expressed as a percentage. + * capacity\_now + +2. Charge: The charge for the power supply, expressed as microamphours. + * charge\_full\_design + * charge\_full + * charge\_now + * charge\_empty + * charge\_empty\_design + +3. Energy: The energy for the power supply, expressed as microwatthours. + * energy\_full\_design + * energy\_full + * energy\_now + * energy\_empty + * energy\_empty\_design + +2. Voltage: The voltage for the power supply, expressed as microvolts. + * voltage\_max\_design + * voltage\_max + * voltage\_now + * voltage\_min + * voltage\_min\_design + +### configuration + +Sample: + +```yaml +battery: + supply: 'BAT0' + charts: 'capacity charge energy voltage' +``` + +The `supply` key specifies the name of the power supply device to monitor. +You can use `ls /sys/class/power_supply` to get a list of such devices +on your system. + +The `charts` key is a space separated list of which charts to try +to display. It defaults to trying to display everything. + +### notes + +* Most drivers provide at least the first chart. Battery powered ACPI +compliant systems (like most laptops) provide all but the third, but do +not provide all of the metrics for each chart. + +* Current, energy, and voltages are reported with a _very_ high precision +by the power\_supply framework. Usually, this is far higher than the +actual hardware supports reporting, so expect to see changes in these +charts jump instead of scaling smoothly. + +* If `max` or `full` attribute is defined by the driver, but not a +corresponding `min or `empty` attribute, then netdata will still provide +the corresponding `min` or `empty`, which will then always read as zero. +This way, alerts which match on these will still work. + +--- diff --git a/python.d/linux_power_supply.chart.py b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.chart.py similarity index 100% rename from python.d/linux_power_supply.chart.py rename to collectors/python.d.plugin/linux_power_supply/linux_power_supply.chart.py diff --git a/conf.d/python.d/linux_power_supply.conf b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf similarity index 100% rename from conf.d/python.d/linux_power_supply.conf rename to collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf diff --git a/collectors/python.d.plugin/litespeed/README.md b/collectors/python.d.plugin/litespeed/README.md new file mode 100644 index 0000000000..d1482f33c7 --- /dev/null +++ b/collectors/python.d.plugin/litespeed/README.md @@ -0,0 +1,47 @@ +# litespeed + +Module monitor litespeed web server performance metrics. + +It produces: + +1. **Network Throughput HTTP** in kilobits/s + * in + * out + +2. **Network Throughput HTTPS** in kilobits/s + * in + * out + +3. **Connections HTTP** in connections + * free + * used + +4. **Connections HTTPS** in connections + * free + * used + +5. **Requests** in requests/s + * requests + +6. **Requests In Processing** in requests + * processing + +7. **Public Cache Hits** in hits/s + * hits + +8. **Private Cache Hits** in hits/s + * hits + +9. **Static Hits** in hits/s + * hits + + +### configuration +```yaml +local: + path : 'PATH' +``` + +If no configuration is given, module will use "/tmp/lshttpd/". + +--- diff --git a/python.d/litespeed.chart.py b/collectors/python.d.plugin/litespeed/litespeed.chart.py similarity index 100% rename from python.d/litespeed.chart.py rename to collectors/python.d.plugin/litespeed/litespeed.chart.py diff --git a/conf.d/python.d/litespeed.conf b/collectors/python.d.plugin/litespeed/litespeed.conf similarity index 100% rename from conf.d/python.d/litespeed.conf rename to collectors/python.d.plugin/litespeed/litespeed.conf diff --git a/collectors/python.d.plugin/logind/README.md b/collectors/python.d.plugin/logind/README.md new file mode 100644 index 0000000000..8f8670d4a6 --- /dev/null +++ b/collectors/python.d.plugin/logind/README.md @@ -0,0 +1,54 @@ +# logind + +This module monitors active sessions, users, and seats tracked by systemd-logind or elogind. + +It provides the following charts: + +1. **Sessions** Tracks the total number of sessions. + * Graphical: Local graphical sessions (running X11, or Wayland, or something else). + * Console: Local console sessions. + * Remote: Remote sessions. + +2. **Users** Tracks total number of unique user logins of each type. + * Graphical + * Console + * Remote + +3. **Seats** Total number of seats in use. + * Seats + +### configuration + +This module needs no configuration. Just make sure the netdata user +can run the `loginctl` command and get a session list without having to +specify a path. + +This will work with any command that can output data in the _exact_ +same format as `loginctl list-sessions --no-legend`. If you have some +other command you want to use that outputs data in this format, you can +specify it using the `command` key like so: + +```yaml +command: '/path/to/other/command' +``` + +### notes + +* This module's ability to track logins is dependent on what PAM services +are configured to register sessions with logind. In particular, for +most systems, it will only track TTY logins, local desktop logins, +and logins through remote shell connections. + +* The users chart counts _usernames_ not UID's. This is potentially +important in configurations where multiple users have the same UID. + +* The users chart counts any given user name up to once for _each_ type +of login. So if the same user has a graphical and a console login on a +system, they will show up once in the graphical count, and once in the +console count. + +* Because the data collection process is rather expensive, this plugin +is currently disabled by default, and needs to be explicitly enabled in +`/etc/netdata/python.d.conf` before it will run. + +--- diff --git a/python.d/logind.chart.py b/collectors/python.d.plugin/logind/logind.chart.py similarity index 100% rename from python.d/logind.chart.py rename to collectors/python.d.plugin/logind/logind.chart.py diff --git a/conf.d/python.d/logind.conf b/collectors/python.d.plugin/logind/logind.conf similarity index 100% rename from conf.d/python.d/logind.conf rename to collectors/python.d.plugin/logind/logind.conf diff --git a/collectors/python.d.plugin/mdstat/README.md b/collectors/python.d.plugin/mdstat/README.md new file mode 100644 index 0000000000..1ff8f7dabc --- /dev/null +++ b/collectors/python.d.plugin/mdstat/README.md @@ -0,0 +1,26 @@ +# mdstat + +Module monitor /proc/mdstat + +It produces: + +1. **Health** Number of failed disks in every array (aggregate chart). + +2. **Disks stats** + * total (number of devices array ideally would have) + * inuse (number of devices currently are in use) + +3. **Current status** + * resync in percent + * recovery in percent + * reshape in percent + * check in percent + +4. **Operation status** (if resync/recovery/reshape/check is active) + * finish in minutes + * speed in megabytes/s + +### configuration +No configuration is needed. + +--- diff --git a/python.d/mdstat.chart.py b/collectors/python.d.plugin/mdstat/mdstat.chart.py similarity index 100% rename from python.d/mdstat.chart.py rename to collectors/python.d.plugin/mdstat/mdstat.chart.py diff --git a/conf.d/python.d/mdstat.conf b/collectors/python.d.plugin/mdstat/mdstat.conf similarity index 100% rename from conf.d/python.d/mdstat.conf rename to collectors/python.d.plugin/mdstat/mdstat.conf diff --git a/collectors/python.d.plugin/megacli/README.md b/collectors/python.d.plugin/megacli/README.md new file mode 100644 index 0000000000..647a056b8d --- /dev/null +++ b/collectors/python.d.plugin/megacli/README.md @@ -0,0 +1,28 @@ +# megacli + +Module collects adapter, physical drives and battery stats. + +**Requirements:** + * `netdata` user needs to be able to be able to sudo the `megacli` program without password + +To grab stats it executes: + * `sudo -n megacli -LDPDInfo -aAll` + * `sudo -n megacli -AdpBbuCmd -a0` + + +It produces: + +1. **Adapter State** + +2. **Physical Drives Media Errors** + +3. **Physical Drives Predictive Failures** + +4. **Battery Relative State of Charge** + +5. **Battery Cycle Count** + +### configuration +Battery stats disabled by default in the module configuration file. + +--- diff --git a/python.d/megacli.chart.py b/collectors/python.d.plugin/megacli/megacli.chart.py similarity index 100% rename from python.d/megacli.chart.py rename to collectors/python.d.plugin/megacli/megacli.chart.py diff --git a/conf.d/python.d/megacli.conf b/collectors/python.d.plugin/megacli/megacli.conf similarity index 100% rename from conf.d/python.d/megacli.conf rename to collectors/python.d.plugin/megacli/megacli.conf diff --git a/collectors/python.d.plugin/memcached/README.md b/collectors/python.d.plugin/memcached/README.md new file mode 100644 index 0000000000..3521c109dc --- /dev/null +++ b/collectors/python.d.plugin/memcached/README.md @@ -0,0 +1,69 @@ +# memcached + +Memcached monitoring module. Data grabbed from [stats interface](https://github.com/memcached/memcached/wiki/Commands#stats). + +1. **Network** in kilobytes/s + * read + * written + +2. **Connections** per second + * current + * rejected + * total + +3. **Items** in cluster + * current + * total + +4. **Evicted and Reclaimed** items + * evicted + * reclaimed + +5. **GET** requests/s + * hits + * misses + +6. **GET rate** rate in requests/s + * rate + +7. **SET rate** rate in requests/s + * rate + +8. **DELETE** requests/s + * hits + * misses + +9. **CAS** requests/s + * hits + * misses + * bad value + +10. **Increment** requests/s + * hits + * misses + +11. **Decrement** requests/s + * hits + * misses + +12. **Touch** requests/s + * hits + * misses + +13. **Touch rate** rate in requests/s + * rate + +### configuration + +Sample: + +```yaml +localtcpip: + name : 'local' + host : '127.0.0.1' + port : 24242 +``` + +If no configuration is given, module will attempt to connect to memcached instance on `127.0.0.1:11211` address. + +--- diff --git a/python.d/memcached.chart.py b/collectors/python.d.plugin/memcached/memcached.chart.py similarity index 100% rename from python.d/memcached.chart.py rename to collectors/python.d.plugin/memcached/memcached.chart.py diff --git a/conf.d/python.d/memcached.conf b/collectors/python.d.plugin/memcached/memcached.conf similarity index 100% rename from conf.d/python.d/memcached.conf rename to collectors/python.d.plugin/memcached/memcached.conf diff --git a/collectors/python.d.plugin/mongodb/README.md b/collectors/python.d.plugin/mongodb/README.md new file mode 100644 index 0000000000..e9252d43c0 --- /dev/null +++ b/collectors/python.d.plugin/mongodb/README.md @@ -0,0 +1,141 @@ +# mongodb + +Module monitor mongodb performance and health metrics + +**Requirements:** + * `python-pymongo` package. + +You need to install it manually. + + +Number of charts depends on mongodb version, storage engine and other features (replication): + +1. **Read requests**: + * query + * getmore (operation the cursor executes to get additional data from query) + +2. **Write requests**: + * insert + * delete + * update + +3. **Active clients**: + * readers (number of clients with read operations in progress or queued) + * writers (number of clients with write operations in progress or queued) + +4. **Journal transactions**: + * commits (count of transactions that have been written to the journal) + +5. **Data written to the journal**: + * volume (volume of data) + +6. **Background flush** (MMAPv1): + * average ms (average time taken by flushes to execute) + * last ms (time taken by the last flush) + +8. **Read tickets** (WiredTiger): + * in use (number of read tickets in use) + * available (number of available read tickets remaining) + +9. **Write tickets** (WiredTiger): + * in use (number of write tickets in use) + * available (number of available write tickets remaining) + +10. **Cursors**: + * opened (number of cursors currently opened by MongoDB for clients) + * timedOut (number of cursors that have timed) + * noTimeout (number of open cursors with timeout disabled) + +11. **Connections**: + * connected (number of clients currently connected to the database server) + * unused (number of unused connections available for new clients) + +12. **Memory usage metrics**: + * virtual + * resident (amount of memory used by the database process) + * mapped + * non mapped + +13. **Page faults**: + * page faults (number of times MongoDB had to request from disk) + +14. **Cache metrics** (WiredTiger): + * percentage of bytes currently in the cache (amount of space taken by cached data) + * percantage of tracked dirty bytes in the cache (amount of space taken by dirty data) + +15. **Pages evicted from cache** (WiredTiger): + * modified + * unmodified + +16. **Queued requests**: + * readers (number of read request currently queued) + * writers (number of write request currently queued) + +17. **Errors**: + * msg (number of message assertions raised) + * warning (number of warning assertions raised) + * regular (number of regular assertions raised) + * user (number of assertions corresponding to errors generated by users) + +18. **Storage metrics** (one chart for every database) + * dataSize (size of all documents + padding in the database) + * indexSize (size of all indexes in the database) + * storageSize (size of all extents in the database) + +19. **Documents in the database** (one chart for all databases) + * documents (number of objects in the database among all the collections) + +20. **tcmalloc metrics** + * central cache free + * current total thread cache + * pageheap free + * pageheap unmapped + * thread cache free + * transfer cache free + * heap size + +21. **Commands total/failed rate** + * count + * createIndex + * delete + * eval + * findAndModify + * insert + +22. **Locks metrics** (acquireCount metrics - number of times the lock was acquired in the specified mode) + * Global lock + * Database lock + * Collection lock + * Metadata lock + * oplog lock + +23. **Replica set members state** + * state + +24. **Oplog window** + * window (interval of time between the oldest and the latest entries in the oplog) + +25. **Replication lag** + * member (time when last entry from the oplog was applied for every member) + +26. **Replication set member heartbeat latency** + * member (time when last heartbeat was received from replica set member) + + +### configuration + +Sample: + +```yaml +local: + name : 'local' + host : '127.0.0.1' + port : 27017 + user : 'netdata' + pass : 'netdata' + +``` + +If no configuration is given, module will attempt to connect to mongodb daemon on `127.0.0.1:27017` address + +--- diff --git a/python.d/mongodb.chart.py b/collectors/python.d.plugin/mongodb/mongodb.chart.py similarity index 100% rename from python.d/mongodb.chart.py rename to collectors/python.d.plugin/mongodb/mongodb.chart.py diff --git a/conf.d/python.d/mongodb.conf b/collectors/python.d.plugin/mongodb/mongodb.conf similarity index 100% rename from conf.d/python.d/mongodb.conf rename to collectors/python.d.plugin/mongodb/mongodb.conf diff --git a/collectors/python.d.plugin/monit/README.md b/collectors/python.d.plugin/monit/README.md new file mode 100644 index 0000000000..6d10240c98 --- /dev/null +++ b/collectors/python.d.plugin/monit/README.md @@ -0,0 +1,33 @@ +# monit + +Monit monitoring module. Data is grabbed from stats XML interface (exists for a long time, but not mentioned in official documentation). Mostly this plugin shows statuses of monit targets, i.e. [statuses of specified checks](https://mmonit.com/monit/documentation/monit.html#Service-checks). + +1. **Filesystems** + * Filesystems + * Directories + * Files + * Pipes + +2. **Applications** + * Processes (+threads/childs) + * Programs + +3. **Network** + * Hosts (+latency) + * Network interfaces + +### configuration + +Sample: + +```yaml +local: + name : 'local' + url : 'http://localhost:2812' + user: : admin + pass: : monit +``` + +If no configuration is given, module will attempt to connect to monit as `http://localhost:2812`. + +--- diff --git a/python.d/monit.chart.py b/collectors/python.d.plugin/monit/monit.chart.py similarity index 100% rename from python.d/monit.chart.py rename to collectors/python.d.plugin/monit/monit.chart.py diff --git a/conf.d/python.d/monit.conf b/collectors/python.d.plugin/monit/monit.conf similarity index 100% rename from conf.d/python.d/monit.conf rename to collectors/python.d.plugin/monit/monit.conf diff --git a/collectors/python.d.plugin/mysql/README.md b/collectors/python.d.plugin/mysql/README.md new file mode 100644 index 0000000000..e38098e7e5 --- /dev/null +++ b/collectors/python.d.plugin/mysql/README.md @@ -0,0 +1,90 @@ +# mysql + +Module monitors one or more mysql servers + +**Requirements:** + * python library [MySQLdb](https://github.com/PyMySQL/mysqlclient-python) (faster) or [PyMySQL](https://github.com/PyMySQL/PyMySQL) (slower) + +It will produce following charts (if data is available): + +1. **Bandwidth** in kbps + * in + * out + +2. **Queries** in queries/sec + * queries + * questions + * slow queries + +3. **Operations** in operations/sec + * opened tables + * flush + * commit + * delete + * prepare + * read first + * read key + * read next + * read prev + * read random + * read random next + * rollback + * save point + * update + * write + +4. **Table Locks** in locks/sec + * immediate + * waited + +5. **Select Issues** in issues/sec + * full join + * full range join + * range + * range check + * scan + +6. **Sort Issues** in issues/sec + * merge passes + * range + * scan + +### configuration + +You can provide, per server, the following: + +1. username which have access to database (defaults to 'root') +2. password (defaults to none) +3. mysql my.cnf configuration file +4. mysql socket (optional) +5. mysql host (ip or hostname) +6. mysql port (defaults to 3306) + +Here is an example for 3 servers: + +```yaml +update_every : 10 +priority : 90100 +retries : 5 + +local: + 'my.cnf' : '/etc/mysql/my.cnf' + priority : 90000 + +local_2: + user : 'root' + pass : 'blablablabla' + socket : '/var/run/mysqld/mysqld.sock' + update_every : 1 + +remote: + user : 'admin' + pass : 'bla' + host : 'example.org' + port : 9000 + retries : 20 +``` + +If no configuration is given, module will attempt to connect to mysql server via unix socket at `/var/run/mysqld/mysqld.sock` without password and with username `root` + +--- diff --git a/python.d/mysql.chart.py b/collectors/python.d.plugin/mysql/mysql.chart.py similarity index 100% rename from python.d/mysql.chart.py rename to collectors/python.d.plugin/mysql/mysql.chart.py diff --git a/conf.d/python.d/mysql.conf b/collectors/python.d.plugin/mysql/mysql.conf similarity index 100% rename from conf.d/python.d/mysql.conf rename to collectors/python.d.plugin/mysql/mysql.conf diff --git a/collectors/python.d.plugin/nginx/README.md b/collectors/python.d.plugin/nginx/README.md new file mode 100644 index 0000000000..007f45c7cc --- /dev/null +++ b/collectors/python.d.plugin/nginx/README.md @@ -0,0 +1,45 @@ +# nginx + +This module will monitor one or more nginx servers depending on configuration. Servers can be either local or remote. + +**Requirements:** + * nginx with configured 'ngx_http_stub_status_module' + * 'location /stub_status' + +Example nginx configuration can be found in 'python.d/nginx.conf' + +It produces following charts: + +1. **Active Connections** + * active + +2. **Requests** in requests/s + * requests + +3. **Active Connections by Status** + * reading + * writing + * waiting + +4. **Connections Rate** in connections/s + * accepts + * handled + +### configuration + +Needs only `url` to server's `stub_status` + +Here is an example for local server: + +```yaml +update_every : 10 +priority : 90100 + +local: + url : 'http://localhost/stub_status' + retries : 10 +``` + +Without configuration, module attempts to connect to `http://localhost/stub_status` + +--- diff --git a/python.d/nginx.chart.py b/collectors/python.d.plugin/nginx/nginx.chart.py similarity index 100% rename from python.d/nginx.chart.py rename to collectors/python.d.plugin/nginx/nginx.chart.py diff --git a/conf.d/python.d/nginx.conf b/collectors/python.d.plugin/nginx/nginx.conf similarity index 100% rename from conf.d/python.d/nginx.conf rename to collectors/python.d.plugin/nginx/nginx.conf diff --git a/collectors/python.d.plugin/nginx_plus/README.md b/collectors/python.d.plugin/nginx_plus/README.md new file mode 100644 index 0000000000..43ec867a32 --- /dev/null +++ b/collectors/python.d.plugin/nginx_plus/README.md @@ -0,0 +1,125 @@ +# nginx_plus + +This module will monitor one or more nginx_plus servers depending on configuration. +Servers can be either local or remote. + +Example nginx_plus configuration can be found in 'python.d/nginx_plus.conf' + +It produces following charts: + +1. **Requests total** in requests/s + * total + +2. **Requests current** in requests + * current + +3. **Connection Statistics** in connections/s + * accepted + * dropped + +4. **Workers Statistics** in workers + * idle + * active + +5. **SSL Handshakes** in handshakes/s + * successful + * failed + +6. **SSL Session Reuses** in sessions/s + * reused + +7. **SSL Memory Usage** in percent + * usage + +8. **Processes** in processes + * respawned + +For every server zone: + +1. **Processing** in requests + * processing + +2. **Requests** in requests/s + * requests + +3. **Responses** in requests/s + * 1xx + * 2xx + * 3xx + * 4xx + * 5xx + +4. **Traffic** in kilobits/s + * received + * sent + +For every upstream: + +1. **Peers Requests** in requests/s + * peer name (dimension per peer) + +2. **All Peers Responses** in responses/s + * 1xx + * 2xx + * 3xx + * 4xx + * 5xx + +3. **Peer Responses** in requests/s (for every peer) + * 1xx + * 2xx + * 3xx + * 4xx + * 5xx + +4. **Peers Connections** in active + * peer name (dimension per peer) + +5. **Peers Connections Usage** in percent + * peer name (dimension per peer) + +6. **All Peers Traffic** in KB + * received + * sent + +7. **Peer Traffic** in KB/s (for every peer) + * received + * sent + +8. **Peer Timings** in ms (for every peer) + * header + * response + +9. **Memory Usage** in percent + * usage + +10. **Peers Status** in state + * peer name (dimension per peer) + +11. **Peers Total Downtime** in seconds + * peer name (dimension per peer) + +For every cache: + +1. **Traffic** in KB + * served + * written + * bypass + +2. **Memory Usage** in percent + * usage + +### configuration + +Needs only `url` to server's `status` + +Here is an example for local server: + +```yaml +local: + url : 'http://localhost/status' +``` + +Without configuration, module fail to start. + +--- diff --git a/python.d/nginx_plus.chart.py b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py similarity index 100% rename from python.d/nginx_plus.chart.py rename to collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py diff --git a/conf.d/python.d/nginx_plus.conf b/collectors/python.d.plugin/nginx_plus/nginx_plus.conf similarity index 100% rename from conf.d/python.d/nginx_plus.conf rename to collectors/python.d.plugin/nginx_plus/nginx_plus.conf diff --git a/collectors/python.d.plugin/nsd/README.md b/collectors/python.d.plugin/nsd/README.md new file mode 100644 index 0000000000..02c302f415 --- /dev/null +++ b/collectors/python.d.plugin/nsd/README.md @@ -0,0 +1,54 @@ +# nsd + +Module uses the `nsd-control stats_noreset` command to provide `nsd` statistics. + +**Requirements:** + * Version of `nsd` must be 4.0+ + * Netdata must have permissions to run `nsd-control stats_noreset` + +It produces: + +1. **Queries** + * queries + +2. **Zones** + * master + * slave + +3. **Protocol** + * udp + * udp6 + * tcp + * tcp6 + +4. **Query Type** + * A + * NS + * CNAME + * SOA + * PTR + * HINFO + * MX + * NAPTR + * TXT + * AAAA + * SRV + * ANY + +5. **Transfer** + * NOTIFY + * AXFR + +6. **Return Code** + * NOERROR + * FORMERR + * SERVFAIL + * NXDOMAIN + * NOTIMP + * REFUSED + * YXDOMAIN + + +Configuration is not needed. + +--- diff --git a/python.d/nsd.chart.py b/collectors/python.d.plugin/nsd/nsd.chart.py similarity index 100% rename from python.d/nsd.chart.py rename to collectors/python.d.plugin/nsd/nsd.chart.py diff --git a/conf.d/python.d/nsd.conf b/collectors/python.d.plugin/nsd/nsd.conf similarity index 100% rename from conf.d/python.d/nsd.conf rename to collectors/python.d.plugin/nsd/nsd.conf diff --git a/collectors/python.d.plugin/ntpd/README.md b/collectors/python.d.plugin/ntpd/README.md new file mode 100644 index 0000000000..b0fa17fde8 --- /dev/null +++ b/collectors/python.d.plugin/ntpd/README.md @@ -0,0 +1,71 @@ +# ntpd + +Module monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](http://doc.ntp.org/current-stable/ntpq.html). + +**Requirements:** + * Version: `NTPv4` + * Local interrogation allowed in `/etc/ntp.conf` (default): + +``` +# Local users may interrogate the ntp server more closely. +restrict 127.0.0.1 +restrict ::1 +``` + +It produces: + +1. system + * offset + * jitter + * frequency + * delay + * dispersion + * stratum + * tc + * precision + +2. peers + * offset + * delay + * dispersion + * jitter + * rootdelay + * rootdispersion + * stratum + * hmode + * pmode + * hpoll + * ppoll + * precision + +**configuration** + +Sample: + +```yaml +update_every: 10 + +host: 'localhost' +port: '123' +show_peers: yes +# hide peers with source address in ranges 127.0.0.0/8 and 192.168.0.0/16 +peer_filter: '(127\..*)|(192\.168\..*)' +# check for new/changed peers every 60 updates +peer_rescan: 60 +``` + +Sample (multiple jobs): + +Note: `ntp.conf` on the host `otherhost` must be configured to allow queries from our local host by including a line like `restrict <IP> nomodify notrap nopeer`. + +```yaml +local: + host: 'localhost' + +otherhost: + host: 'otherhost' +``` + +If no configuration is given, module will attempt to connect to `ntpd` on `::1:123` or `127.0.0.1:123` and show charts for the systemvars. Use `show_peers: yes` to also show the charts for configured peers. Local peers in the range `127.0.0.0/8` are hidden by default, use `peer_filter: ''` to show all peers. + +--- diff --git a/python.d/ntpd.chart.py b/collectors/python.d.plugin/ntpd/ntpd.chart.py similarity index 100% rename from python.d/ntpd.chart.py rename to collectors/python.d.plugin/ntpd/ntpd.chart.py diff --git a/conf.d/python.d/ntpd.conf b/collectors/python.d.plugin/ntpd/ntpd.conf similarity index 100% rename from conf.d/python.d/ntpd.conf rename to collectors/python.d.plugin/ntpd/ntpd.conf diff --git a/collectors/python.d.plugin/ovpn_status_log/README.md b/collectors/python.d.plugin/ovpn_status_log/README.md new file mode 100644 index 0000000000..be1ea279ec --- /dev/null +++ b/collectors/python.d.plugin/ovpn_status_log/README.md @@ -0,0 +1,32 @@ +# ovpn_status_log + +Module monitor openvpn-status log file. + +**Requirements:** + + * If you are running multiple OpenVPN instances out of the same directory, MAKE SURE TO EDIT DIRECTIVES which create output files + so that multiple instances do not overwrite each other's output files. + + * Make sure NETDATA USER CAN READ openvpn-status.log + + * Update_every interval MUST MATCH interval on which OpenVPN writes operational status to log file. + +It produces: + +1. **Users** OpenVPN active users + * users + +2. **Traffic** OpenVPN overall bandwidth usage in kilobit/s + * in + * out + +### configuration + +Sample: + +```yaml +default + log_path : '/var/log/openvpn-status.log' +``` + +--- diff --git a/python.d/ovpn_status_log.chart.py b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py similarity index 100% rename from python.d/ovpn_status_log.chart.py rename to collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py diff --git a/conf.d/python.d/ovpn_status_log.conf b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf similarity index 100% rename from conf.d/python.d/ovpn_status_log.conf rename to collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf diff --git a/collectors/python.d.plugin/phpfpm/README.md b/collectors/python.d.plugin/phpfpm/README.md new file mode 100644 index 0000000000..66930463fc --- /dev/null +++ b/collectors/python.d.plugin/phpfpm/README.md @@ -0,0 +1,40 @@ +# phpfpm + +This module will monitor one or more php-fpm instances depending on configuration. + +**Requirements:** + * php-fpm with enabled `status` page + * access to `status` page via web server + +It produces following charts: + +1. **Active Connections** + * active + * maxActive + * idle + +2. **Requests** in requests/s + * requests + +3. **Performance** + * reached + * slow + +### configuration + +Needs only `url` to server's `status` + +Here is an example for local instance: + +```yaml +update_every : 3 +priority : 90100 + +local: + url : 'http://localhost/status' + retries : 10 +``` + +Without configuration, module attempts to connect to `http://localhost/status` + +--- diff --git a/python.d/phpfpm.chart.py b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py similarity index 100% rename from python.d/phpfpm.chart.py rename to collectors/python.d.plugin/phpfpm/phpfpm.chart.py diff --git a/conf.d/python.d/phpfpm.conf b/collectors/python.d.plugin/phpfpm/phpfpm.conf similarity index 100% rename from conf.d/python.d/phpfpm.conf rename to collectors/python.d.plugin/phpfpm/phpfpm.conf diff --git a/collectors/python.d.plugin/portcheck/README.md b/collectors/python.d.plugin/portcheck/README.md new file mode 100644 index 0000000000..f1338d5768 --- /dev/null +++ b/collectors/python.d.plugin/portcheck/README.md @@ -0,0 +1,35 @@ +# portcheck + +Module monitors a remote TCP service. + +Following charts are drawn per host: + +1. **Latency** ms + * Time required to connect to a TCP port. + Displays latency in 0.1 ms resolution. If the connection failed, the value is missing. + +2. **Status** boolean + * Connection successful + * Could not create socket: possible DNS problems + * Connection refused: port not listening or blocked + * Connection timed out: host or port unreachable + + +### configuration + +```yaml +server: + host: 'dns or ip' # required + port: 22 # required + timeout: 1 # optional + update_every: 1 # optional +``` + +### notes + + * The error chart is intended for alarms, badges or for access via API. + * A system/service/firewall might block netdata's access if a portscan or + similar is detected. + * Currently, the accuracy of the latency is low and should be used as reference only. + +--- diff --git a/python.d/portcheck.chart.py b/collectors/python.d.plugin/portcheck/portcheck.chart.py similarity index 100% rename from python.d/portcheck.chart.py rename to collectors/python.d.plugin/portcheck/portcheck.chart.py diff --git a/conf.d/python.d/portcheck.conf b/collectors/python.d.plugin/portcheck/portcheck.conf similarity index 100% rename from conf.d/python.d/portcheck.conf rename to collectors/python.d.plugin/portcheck/portcheck.conf diff --git a/collectors/python.d.plugin/postfix/README.md b/collectors/python.d.plugin/postfix/README.md new file mode 100644 index 0000000000..77c95ff44f --- /dev/null +++ b/collectors/python.d.plugin/postfix/README.md @@ -0,0 +1,15 @@ +# postfix + +Simple module executing `postfix -p` to grab postfix queue. + +It produces only two charts: + +1. **Postfix Queue Emails** + * emails + +2. **Postfix Queue Emails Size** in KB + * size + +Configuration is not needed. + +--- diff --git a/python.d/postfix.chart.py b/collectors/python.d.plugin/postfix/postfix.chart.py similarity index 100% rename from python.d/postfix.chart.py rename to collectors/python.d.plugin/postfix/postfix.chart.py diff --git a/conf.d/python.d/postfix.conf b/collectors/python.d.plugin/postfix/postfix.conf similarity index 100% rename from conf.d/python.d/postfix.conf rename to collectors/python.d.plugin/postfix/postfix.conf diff --git a/collectors/python.d.plugin/postgres/README.md b/collectors/python.d.plugin/postgres/README.md new file mode 100644 index 0000000000..e7b108d363 --- /dev/null +++ b/collectors/python.d.plugin/postgres/README.md @@ -0,0 +1,68 @@ +# postgres + +Module monitors one or more postgres servers. + +**Requirements:** + + * `python-psycopg2` package. You have to install it manually. + +Following charts are drawn: + +1. **Database size** MB + * size + +2. **Current Backend Processes** processes + * active + +3. **Write-Ahead Logging Statistics** files/s + * total + * ready + * done + +4. **Checkpoints** writes/s + * scheduled + * requested + +5. **Current connections to db** count + * connections + +6. **Tuples returned from db** tuples/s + * sequential + * bitmap + +7. **Tuple reads from db** reads/s + * disk + * cache + +8. **Transactions on db** transactions/s + * committed + * rolled back + +9. **Tuples written to db** writes/s + * inserted + * updated + * deleted + * conflicts + +10. **Locks on db** count per type + * locks + +### configuration + +```yaml +socket: + name : 'socket' + user : 'postgres' + database : 'postgres' + +tcp: + name : 'tcp' + user : 'postgres' + database : 'postgres' + host : 'localhost' + port : 5432 +``` + +When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:5432`. + +--- diff --git a/python.d/postgres.chart.py b/collectors/python.d.plugin/postgres/postgres.chart.py similarity index 100% rename from python.d/postgres.chart.py rename to collectors/python.d.plugin/postgres/postgres.chart.py diff --git a/conf.d/python.d/postgres.conf b/collectors/python.d.plugin/postgres/postgres.conf similarity index 100% rename from conf.d/python.d/postgres.conf rename to collectors/python.d.plugin/postgres/postgres.conf diff --git a/collectors/python.d.plugin/powerdns/README.md b/collectors/python.d.plugin/powerdns/README.md new file mode 100644 index 0000000000..3c4b145e07 --- /dev/null +++ b/collectors/python.d.plugin/powerdns/README.md @@ -0,0 +1,77 @@ +# powerdns + +Module monitor powerdns performance and health metrics. + +Powerdns charts: + +1. **Queries and Answers** + * udp-queries + * udp-answers + * tcp-queries + * tcp-answers + +2. **Cache Usage** + * query-cache-hit + * query-cache-miss + * packetcache-hit + * packetcache-miss + +3. **Cache Size** + * query-cache-size + * packetcache-size + * key-cache-size + * meta-cache-size + +4. **Latency** + * latency + + Powerdns Recursor charts: + + 1. **Questions In** + * questions + * ipv6-questions + * tcp-queries + +2. **Questions Out** + * all-outqueries + * ipv6-outqueries + * tcp-outqueries + * throttled-outqueries + +3. **Answer Times** + * answers-slow + * answers0-1 + * answers1-10 + * answers10-100 + * answers100-1000 + +4. **Timeouts** + * outgoing-timeouts + * outgoing4-timeouts + * outgoing6-timeouts + +5. **Drops** + * over-capacity-drops + +6. **Cache Usage** + * cache-hits + * cache-misses + * packetcache-hits + * packetcache-misses + +7. **Cache Size** + * cache-entries + * packetcache-entries + * negcache-entries + +### configuration + +```yaml +local: + name : 'local' + url : 'http://127.0.0.1:8081/api/v1/servers/localhost/statistics' + header : + X-API-Key: 'change_me' +``` + +--- diff --git a/python.d/powerdns.chart.py b/collectors/python.d.plugin/powerdns/powerdns.chart.py similarity index 100% rename from python.d/powerdns.chart.py rename to collectors/python.d.plugin/powerdns/powerdns.chart.py diff --git a/conf.d/python.d/powerdns.conf b/collectors/python.d.plugin/powerdns/powerdns.conf similarity index 100% rename from conf.d/python.d/powerdns.conf rename to collectors/python.d.plugin/powerdns/powerdns.conf diff --git a/collectors/python.d.plugin/puppet/README.md b/collectors/python.d.plugin/puppet/README.md new file mode 100644 index 0000000000..8304c831ef --- /dev/null +++ b/collectors/python.d.plugin/puppet/README.md @@ -0,0 +1,48 @@ +# puppet + +Monitor status of Puppet Server and Puppet DB. + +Following charts are drawn: + +1. **JVM Heap** + * committed (allocated from OS) + * used (actual use) +2. **JVM Non-Heap** + * committed (allocated from OS) + * used (actual use) +3. **CPU Usage** + * execution + * GC (taken by garbage collection) +4. **File Descriptors** + * max + * used + + +### configuration + +```yaml +puppetdb: + url: 'https://fqdn.example.com:8081' + tls_cert_file: /path/to/client.crt + tls_key_file: /path/to/client.key + autodetection_retry: 1 + retries: 3600 + +puppetserver: + url: 'https://fqdn.example.com:8140' + autodetection_retry: 1 + retries: 3600 +``` + +When no configuration is given then `https://fqdn.example.com:8140` is +tried without any retries. + +### notes + +* Exact Fully Qualified Domain Name of the node should be used. +* Usually Puppet Server/DB startup time is VERY long. So, there should + be quite reasonable retry count. +* Secure PuppetDB config may require client certificate. Not applies + to default PuppetDB configuration though. + +--- diff --git a/python.d/puppet.chart.py b/collectors/python.d.plugin/puppet/puppet.chart.py similarity index 100% rename from python.d/puppet.chart.py rename to collectors/python.d.plugin/puppet/puppet.chart.py diff --git a/conf.d/python.d/puppet.conf b/collectors/python.d.plugin/puppet/puppet.conf similarity index 100% rename from conf.d/python.d/puppet.conf rename to collectors/python.d.plugin/puppet/puppet.conf diff --git a/conf.d/python.d.conf b/collectors/python.d.plugin/python.d.conf similarity index 100% rename from conf.d/python.d.conf rename to collectors/python.d.plugin/python.d.conf diff --git a/plugins.d/python.d.plugin.in b/collectors/python.d.plugin/python.d.plugin.in similarity index 100% rename from plugins.d/python.d.plugin.in rename to collectors/python.d.plugin/python.d.plugin.in diff --git a/python.d/python_modules/bases/__init__.py b/collectors/python.d.plugin/python_modules/__init__.py similarity index 100% rename from python.d/python_modules/bases/__init__.py rename to collectors/python.d.plugin/python_modules/__init__.py diff --git a/python.d/python_modules/bases/FrameworkServices/ExecutableService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py similarity index 100% rename from python.d/python_modules/bases/FrameworkServices/ExecutableService.py rename to collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py diff --git a/python.d/python_modules/bases/FrameworkServices/LogService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py similarity index 100% rename from python.d/python_modules/bases/FrameworkServices/LogService.py rename to collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py diff --git a/python.d/python_modules/bases/FrameworkServices/MySQLService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py similarity index 100% rename from python.d/python_modules/bases/FrameworkServices/MySQLService.py rename to collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py diff --git a/python.d/python_modules/bases/FrameworkServices/SimpleService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py similarity index 100% rename from python.d/python_modules/bases/FrameworkServices/SimpleService.py rename to collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py diff --git a/python.d/python_modules/bases/FrameworkServices/SocketService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py similarity index 100% rename from python.d/python_modules/bases/FrameworkServices/SocketService.py rename to collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py diff --git a/python.d/python_modules/bases/FrameworkServices/UrlService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py similarity index 100% rename from python.d/python_modules/bases/FrameworkServices/UrlService.py rename to collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py diff --git a/python.d/python_modules/third_party/__init__.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py similarity index 100% rename from python.d/python_modules/third_party/__init__.py rename to collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py diff --git a/python.d/python_modules/urllib3/contrib/__init__.py b/collectors/python.d.plugin/python_modules/bases/__init__.py similarity index 100% rename from python.d/python_modules/urllib3/contrib/__init__.py rename to collectors/python.d.plugin/python_modules/bases/__init__.py diff --git a/python.d/python_modules/bases/charts.py b/collectors/python.d.plugin/python_modules/bases/charts.py similarity index 100% rename from python.d/python_modules/bases/charts.py rename to collectors/python.d.plugin/python_modules/bases/charts.py diff --git a/python.d/python_modules/bases/collection.py b/collectors/python.d.plugin/python_modules/bases/collection.py similarity index 100% rename from python.d/python_modules/bases/collection.py rename to collectors/python.d.plugin/python_modules/bases/collection.py diff --git a/python.d/python_modules/bases/loaders.py b/collectors/python.d.plugin/python_modules/bases/loaders.py similarity index 100% rename from python.d/python_modules/bases/loaders.py rename to collectors/python.d.plugin/python_modules/bases/loaders.py diff --git a/python.d/python_modules/bases/loggers.py b/collectors/python.d.plugin/python_modules/bases/loggers.py similarity index 100% rename from python.d/python_modules/bases/loggers.py rename to collectors/python.d.plugin/python_modules/bases/loggers.py diff --git a/python.d/python_modules/pyyaml2/__init__.py b/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py similarity index 100% rename from python.d/python_modules/pyyaml2/__init__.py rename to collectors/python.d.plugin/python_modules/pyyaml2/__init__.py diff --git a/python.d/python_modules/pyyaml2/composer.py b/collectors/python.d.plugin/python_modules/pyyaml2/composer.py similarity index 100% rename from python.d/python_modules/pyyaml2/composer.py rename to collectors/python.d.plugin/python_modules/pyyaml2/composer.py diff --git a/python.d/python_modules/pyyaml2/constructor.py b/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py similarity index 100% rename from python.d/python_modules/pyyaml2/constructor.py rename to collectors/python.d.plugin/python_modules/pyyaml2/constructor.py diff --git a/python.d/python_modules/pyyaml2/cyaml.py b/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py similarity index 100% rename from python.d/python_modules/pyyaml2/cyaml.py rename to collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py diff --git a/python.d/python_modules/pyyaml2/dumper.py b/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py similarity index 100% rename from python.d/python_modules/pyyaml2/dumper.py rename to collectors/python.d.plugin/python_modules/pyyaml2/dumper.py diff --git a/python.d/python_modules/pyyaml2/emitter.py b/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py similarity index 100% rename from python.d/python_modules/pyyaml2/emitter.py rename to collectors/python.d.plugin/python_modules/pyyaml2/emitter.py diff --git a/python.d/python_modules/pyyaml2/error.py b/collectors/python.d.plugin/python_modules/pyyaml2/error.py similarity index 100% rename from python.d/python_modules/pyyaml2/error.py rename to collectors/python.d.plugin/python_modules/pyyaml2/error.py diff --git a/python.d/python_modules/pyyaml2/events.py b/collectors/python.d.plugin/python_modules/pyyaml2/events.py similarity index 100% rename from python.d/python_modules/pyyaml2/events.py rename to collectors/python.d.plugin/python_modules/pyyaml2/events.py diff --git a/python.d/python_modules/pyyaml2/loader.py b/collectors/python.d.plugin/python_modules/pyyaml2/loader.py similarity index 100% rename from python.d/python_modules/pyyaml2/loader.py rename to collectors/python.d.plugin/python_modules/pyyaml2/loader.py diff --git a/python.d/python_modules/pyyaml2/nodes.py b/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py similarity index 100% rename from python.d/python_modules/pyyaml2/nodes.py rename to collectors/python.d.plugin/python_modules/pyyaml2/nodes.py diff --git a/python.d/python_modules/pyyaml2/parser.py b/collectors/python.d.plugin/python_modules/pyyaml2/parser.py similarity index 100% rename from python.d/python_modules/pyyaml2/parser.py rename to collectors/python.d.plugin/python_modules/pyyaml2/parser.py diff --git a/python.d/python_modules/pyyaml2/reader.py b/collectors/python.d.plugin/python_modules/pyyaml2/reader.py similarity index 100% rename from python.d/python_modules/pyyaml2/reader.py rename to collectors/python.d.plugin/python_modules/pyyaml2/reader.py diff --git a/python.d/python_modules/pyyaml2/representer.py b/collectors/python.d.plugin/python_modules/pyyaml2/representer.py similarity index 100% rename from python.d/python_modules/pyyaml2/representer.py rename to collectors/python.d.plugin/python_modules/pyyaml2/representer.py diff --git a/python.d/python_modules/pyyaml2/resolver.py b/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py similarity index 100% rename from python.d/python_modules/pyyaml2/resolver.py rename to collectors/python.d.plugin/python_modules/pyyaml2/resolver.py diff --git a/python.d/python_modules/pyyaml2/scanner.py b/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py similarity index 100% rename from python.d/python_modules/pyyaml2/scanner.py rename to collectors/python.d.plugin/python_modules/pyyaml2/scanner.py diff --git a/python.d/python_modules/pyyaml2/serializer.py b/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py similarity index 100% rename from python.d/python_modules/pyyaml2/serializer.py rename to collectors/python.d.plugin/python_modules/pyyaml2/serializer.py diff --git a/python.d/python_modules/pyyaml2/tokens.py b/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py similarity index 100% rename from python.d/python_modules/pyyaml2/tokens.py rename to collectors/python.d.plugin/python_modules/pyyaml2/tokens.py diff --git a/python.d/python_modules/pyyaml3/__init__.py b/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py similarity index 100% rename from python.d/python_modules/pyyaml3/__init__.py rename to collectors/python.d.plugin/python_modules/pyyaml3/__init__.py diff --git a/python.d/python_modules/pyyaml3/composer.py b/collectors/python.d.plugin/python_modules/pyyaml3/composer.py similarity index 100% rename from python.d/python_modules/pyyaml3/composer.py rename to collectors/python.d.plugin/python_modules/pyyaml3/composer.py diff --git a/python.d/python_modules/pyyaml3/constructor.py b/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py similarity index 100% rename from python.d/python_modules/pyyaml3/constructor.py rename to collectors/python.d.plugin/python_modules/pyyaml3/constructor.py diff --git a/python.d/python_modules/pyyaml3/cyaml.py b/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py similarity index 100% rename from python.d/python_modules/pyyaml3/cyaml.py rename to collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py diff --git a/python.d/python_modules/pyyaml3/dumper.py b/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py similarity index 100% rename from python.d/python_modules/pyyaml3/dumper.py rename to collectors/python.d.plugin/python_modules/pyyaml3/dumper.py diff --git a/python.d/python_modules/pyyaml3/emitter.py b/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py similarity index 100% rename from python.d/python_modules/pyyaml3/emitter.py rename to collectors/python.d.plugin/python_modules/pyyaml3/emitter.py diff --git a/python.d/python_modules/pyyaml3/error.py b/collectors/python.d.plugin/python_modules/pyyaml3/error.py similarity index 100% rename from python.d/python_modules/pyyaml3/error.py rename to collectors/python.d.plugin/python_modules/pyyaml3/error.py diff --git a/python.d/python_modules/pyyaml3/events.py b/collectors/python.d.plugin/python_modules/pyyaml3/events.py similarity index 100% rename from python.d/python_modules/pyyaml3/events.py rename to collectors/python.d.plugin/python_modules/pyyaml3/events.py diff --git a/python.d/python_modules/pyyaml3/loader.py b/collectors/python.d.plugin/python_modules/pyyaml3/loader.py similarity index 100% rename from python.d/python_modules/pyyaml3/loader.py rename to collectors/python.d.plugin/python_modules/pyyaml3/loader.py diff --git a/python.d/python_modules/pyyaml3/nodes.py b/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py similarity index 100% rename from python.d/python_modules/pyyaml3/nodes.py rename to collectors/python.d.plugin/python_modules/pyyaml3/nodes.py diff --git a/python.d/python_modules/pyyaml3/parser.py b/collectors/python.d.plugin/python_modules/pyyaml3/parser.py similarity index 100% rename from python.d/python_modules/pyyaml3/parser.py rename to collectors/python.d.plugin/python_modules/pyyaml3/parser.py diff --git a/python.d/python_modules/pyyaml3/reader.py b/collectors/python.d.plugin/python_modules/pyyaml3/reader.py similarity index 100% rename from python.d/python_modules/pyyaml3/reader.py rename to collectors/python.d.plugin/python_modules/pyyaml3/reader.py diff --git a/python.d/python_modules/pyyaml3/representer.py b/collectors/python.d.plugin/python_modules/pyyaml3/representer.py similarity index 100% rename from python.d/python_modules/pyyaml3/representer.py rename to collectors/python.d.plugin/python_modules/pyyaml3/representer.py diff --git a/python.d/python_modules/pyyaml3/resolver.py b/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py similarity index 100% rename from python.d/python_modules/pyyaml3/resolver.py rename to collectors/python.d.plugin/python_modules/pyyaml3/resolver.py diff --git a/python.d/python_modules/pyyaml3/scanner.py b/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py similarity index 100% rename from python.d/python_modules/pyyaml3/scanner.py rename to collectors/python.d.plugin/python_modules/pyyaml3/scanner.py diff --git a/python.d/python_modules/pyyaml3/serializer.py b/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py similarity index 100% rename from python.d/python_modules/pyyaml3/serializer.py rename to collectors/python.d.plugin/python_modules/pyyaml3/serializer.py diff --git a/python.d/python_modules/pyyaml3/tokens.py b/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py similarity index 100% rename from python.d/python_modules/pyyaml3/tokens.py rename to collectors/python.d.plugin/python_modules/pyyaml3/tokens.py diff --git a/python.d/python_modules/urllib3/contrib/_securetransport/__init__.py b/collectors/python.d.plugin/python_modules/third_party/__init__.py similarity index 100% rename from python.d/python_modules/urllib3/contrib/_securetransport/__init__.py rename to collectors/python.d.plugin/python_modules/third_party/__init__.py diff --git a/python.d/python_modules/third_party/boinc_client.py b/collectors/python.d.plugin/python_modules/third_party/boinc_client.py similarity index 100% rename from python.d/python_modules/third_party/boinc_client.py rename to collectors/python.d.plugin/python_modules/third_party/boinc_client.py diff --git a/python.d/python_modules/third_party/lm_sensors.py b/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py similarity index 100% rename from python.d/python_modules/third_party/lm_sensors.py rename to collectors/python.d.plugin/python_modules/third_party/lm_sensors.py diff --git a/python.d/python_modules/third_party/mcrcon.py b/collectors/python.d.plugin/python_modules/third_party/mcrcon.py similarity index 100% rename from python.d/python_modules/third_party/mcrcon.py rename to collectors/python.d.plugin/python_modules/third_party/mcrcon.py diff --git a/python.d/python_modules/third_party/monotonic.py b/collectors/python.d.plugin/python_modules/third_party/monotonic.py similarity index 100% rename from python.d/python_modules/third_party/monotonic.py rename to collectors/python.d.plugin/python_modules/third_party/monotonic.py diff --git a/python.d/python_modules/third_party/ordereddict.py b/collectors/python.d.plugin/python_modules/third_party/ordereddict.py similarity index 100% rename from python.d/python_modules/third_party/ordereddict.py rename to collectors/python.d.plugin/python_modules/third_party/ordereddict.py diff --git a/python.d/python_modules/urllib3/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/__init__.py similarity index 100% rename from python.d/python_modules/urllib3/__init__.py rename to collectors/python.d.plugin/python_modules/urllib3/__init__.py diff --git a/python.d/python_modules/urllib3/_collections.py b/collectors/python.d.plugin/python_modules/urllib3/_collections.py similarity index 100% rename from python.d/python_modules/urllib3/_collections.py rename to collectors/python.d.plugin/python_modules/urllib3/_collections.py diff --git a/python.d/python_modules/urllib3/connection.py b/collectors/python.d.plugin/python_modules/urllib3/connection.py similarity index 100% rename from python.d/python_modules/urllib3/connection.py rename to collectors/python.d.plugin/python_modules/urllib3/connection.py diff --git a/python.d/python_modules/urllib3/connectionpool.py b/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py similarity index 100% rename from python.d/python_modules/urllib3/connectionpool.py rename to collectors/python.d.plugin/python_modules/urllib3/connectionpool.py diff --git a/python.d/python_modules/urllib3/packages/backports/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py similarity index 100% rename from python.d/python_modules/urllib3/packages/backports/__init__.py rename to collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py diff --git a/src/.keep b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py similarity index 100% rename from src/.keep rename to collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py diff --git a/python.d/python_modules/urllib3/contrib/_securetransport/bindings.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py similarity index 100% rename from python.d/python_modules/urllib3/contrib/_securetransport/bindings.py rename to collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py diff --git a/python.d/python_modules/urllib3/contrib/_securetransport/low_level.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py similarity index 100% rename from python.d/python_modules/urllib3/contrib/_securetransport/low_level.py rename to collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py diff --git a/python.d/python_modules/urllib3/contrib/appengine.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py similarity index 100% rename from python.d/python_modules/urllib3/contrib/appengine.py rename to collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py diff --git a/python.d/python_modules/urllib3/contrib/ntlmpool.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py similarity index 100% rename from python.d/python_modules/urllib3/contrib/ntlmpool.py rename to collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py diff --git a/python.d/python_modules/urllib3/contrib/pyopenssl.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py similarity index 100% rename from python.d/python_modules/urllib3/contrib/pyopenssl.py rename to collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py diff --git a/python.d/python_modules/urllib3/contrib/securetransport.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py similarity index 100% rename from python.d/python_modules/urllib3/contrib/securetransport.py rename to collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py diff --git a/python.d/python_modules/urllib3/contrib/socks.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py similarity index 100% rename from python.d/python_modules/urllib3/contrib/socks.py rename to collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py diff --git a/python.d/python_modules/urllib3/exceptions.py b/collectors/python.d.plugin/python_modules/urllib3/exceptions.py similarity index 100% rename from python.d/python_modules/urllib3/exceptions.py rename to collectors/python.d.plugin/python_modules/urllib3/exceptions.py diff --git a/python.d/python_modules/urllib3/fields.py b/collectors/python.d.plugin/python_modules/urllib3/fields.py similarity index 100% rename from python.d/python_modules/urllib3/fields.py rename to collectors/python.d.plugin/python_modules/urllib3/fields.py diff --git a/python.d/python_modules/urllib3/filepost.py b/collectors/python.d.plugin/python_modules/urllib3/filepost.py similarity index 100% rename from python.d/python_modules/urllib3/filepost.py rename to collectors/python.d.plugin/python_modules/urllib3/filepost.py diff --git a/python.d/python_modules/urllib3/packages/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py similarity index 100% rename from python.d/python_modules/urllib3/packages/__init__.py rename to collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/python.d/python_modules/urllib3/packages/backports/makefile.py b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py similarity index 100% rename from python.d/python_modules/urllib3/packages/backports/makefile.py rename to collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py diff --git a/python.d/python_modules/urllib3/packages/ordered_dict.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py similarity index 100% rename from python.d/python_modules/urllib3/packages/ordered_dict.py rename to collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py diff --git a/python.d/python_modules/urllib3/packages/six.py b/collectors/python.d.plugin/python_modules/urllib3/packages/six.py similarity index 100% rename from python.d/python_modules/urllib3/packages/six.py rename to collectors/python.d.plugin/python_modules/urllib3/packages/six.py diff --git a/python.d/python_modules/urllib3/packages/ssl_match_hostname/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py similarity index 100% rename from python.d/python_modules/urllib3/packages/ssl_match_hostname/__init__.py rename to collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py diff --git a/python.d/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py similarity index 100% rename from python.d/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py rename to collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py diff --git a/python.d/python_modules/urllib3/poolmanager.py b/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py similarity index 100% rename from python.d/python_modules/urllib3/poolmanager.py rename to collectors/python.d.plugin/python_modules/urllib3/poolmanager.py diff --git a/python.d/python_modules/urllib3/request.py b/collectors/python.d.plugin/python_modules/urllib3/request.py similarity index 100% rename from python.d/python_modules/urllib3/request.py rename to collectors/python.d.plugin/python_modules/urllib3/request.py diff --git a/python.d/python_modules/urllib3/response.py b/collectors/python.d.plugin/python_modules/urllib3/response.py similarity index 100% rename from python.d/python_modules/urllib3/response.py rename to collectors/python.d.plugin/python_modules/urllib3/response.py diff --git a/python.d/python_modules/urllib3/util/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py similarity index 100% rename from python.d/python_modules/urllib3/util/__init__.py rename to collectors/python.d.plugin/python_modules/urllib3/util/__init__.py diff --git a/python.d/python_modules/urllib3/util/connection.py b/collectors/python.d.plugin/python_modules/urllib3/util/connection.py similarity index 100% rename from python.d/python_modules/urllib3/util/connection.py rename to collectors/python.d.plugin/python_modules/urllib3/util/connection.py diff --git a/python.d/python_modules/urllib3/util/request.py b/collectors/python.d.plugin/python_modules/urllib3/util/request.py similarity index 100% rename from python.d/python_modules/urllib3/util/request.py rename to collectors/python.d.plugin/python_modules/urllib3/util/request.py diff --git a/python.d/python_modules/urllib3/util/response.py b/collectors/python.d.plugin/python_modules/urllib3/util/response.py similarity index 100% rename from python.d/python_modules/urllib3/util/response.py rename to collectors/python.d.plugin/python_modules/urllib3/util/response.py diff --git a/python.d/python_modules/urllib3/util/retry.py b/collectors/python.d.plugin/python_modules/urllib3/util/retry.py similarity index 100% rename from python.d/python_modules/urllib3/util/retry.py rename to collectors/python.d.plugin/python_modules/urllib3/util/retry.py diff --git a/python.d/python_modules/urllib3/util/selectors.py b/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py similarity index 100% rename from python.d/python_modules/urllib3/util/selectors.py rename to collectors/python.d.plugin/python_modules/urllib3/util/selectors.py diff --git a/python.d/python_modules/urllib3/util/ssl_.py b/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py similarity index 100% rename from python.d/python_modules/urllib3/util/ssl_.py rename to collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py diff --git a/python.d/python_modules/urllib3/util/timeout.py b/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py similarity index 100% rename from python.d/python_modules/urllib3/util/timeout.py rename to collectors/python.d.plugin/python_modules/urllib3/util/timeout.py diff --git a/python.d/python_modules/urllib3/util/url.py b/collectors/python.d.plugin/python_modules/urllib3/util/url.py similarity index 100% rename from python.d/python_modules/urllib3/util/url.py rename to collectors/python.d.plugin/python_modules/urllib3/util/url.py diff --git a/python.d/python_modules/urllib3/util/wait.py b/collectors/python.d.plugin/python_modules/urllib3/util/wait.py similarity index 100% rename from python.d/python_modules/urllib3/util/wait.py rename to collectors/python.d.plugin/python_modules/urllib3/util/wait.py diff --git a/collectors/python.d.plugin/rabbitmq/README.md b/collectors/python.d.plugin/rabbitmq/README.md new file mode 100644 index 0000000000..22d367c4d5 --- /dev/null +++ b/collectors/python.d.plugin/rabbitmq/README.md @@ -0,0 +1,56 @@ +# rabbitmq + +Module monitor rabbitmq performance and health metrics. + +Following charts are drawn: + +1. **Queued Messages** + * ready + * unacknowledged + +2. **Message Rates** + * ack + * redelivered + * deliver + * publish + +3. **Global Counts** + * channels + * consumers + * connections + * queues + * exchanges + +4. **File Descriptors** + * used descriptors + +5. **Socket Descriptors** + * used descriptors + +6. **Erlang processes** + * used processes + +7. **Erlang run queue** + * Erlang run queue + +8. **Memory** + * free memory in megabytes + +9. **Disk Space** + * free disk space in gigabytes + +### configuration + +```yaml +socket: + name : 'local' + host : '127.0.0.1' + port : 15672 + user : 'guest' + pass : 'guest' + +``` + +When no configuration file is found, module tries to connect to: `localhost:15672`. + +--- diff --git a/python.d/rabbitmq.chart.py b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py similarity index 100% rename from python.d/rabbitmq.chart.py rename to collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py diff --git a/conf.d/python.d/rabbitmq.conf b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf similarity index 100% rename from conf.d/python.d/rabbitmq.conf rename to collectors/python.d.plugin/rabbitmq/rabbitmq.conf diff --git a/collectors/python.d.plugin/redis/README.md b/collectors/python.d.plugin/redis/README.md new file mode 100644 index 0000000000..8d21df0ca9 --- /dev/null +++ b/collectors/python.d.plugin/redis/README.md @@ -0,0 +1,42 @@ +# redis + +Get INFO data from redis instance. + +Following charts are drawn: + +1. **Operations** per second + * operations + +2. **Hit rate** in percent + * rate + +3. **Memory utilization** in kilobytes + * total + * lua + +4. **Database keys** + * lines are creates dynamically based on how many databases are there + +5. **Clients** + * connected + * blocked + +6. **Slaves** + * connected + +### configuration + +```yaml +socket: + name : 'local' + socket : '/var/lib/redis/redis.sock' + +localhost: + name : 'local' + host : 'localhost' + port : 6379 +``` + +When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:6379`. + +--- diff --git a/python.d/redis.chart.py b/collectors/python.d.plugin/redis/redis.chart.py similarity index 100% rename from python.d/redis.chart.py rename to collectors/python.d.plugin/redis/redis.chart.py diff --git a/conf.d/python.d/redis.conf b/collectors/python.d.plugin/redis/redis.conf similarity index 100% rename from conf.d/python.d/redis.conf rename to collectors/python.d.plugin/redis/redis.conf diff --git a/collectors/python.d.plugin/rethinkdbs/README.md b/collectors/python.d.plugin/rethinkdbs/README.md new file mode 100644 index 0000000000..5d357fa499 --- /dev/null +++ b/collectors/python.d.plugin/rethinkdbs/README.md @@ -0,0 +1,34 @@ +# rethinkdbs + +Module monitor rethinkdb health metrics. + +Following charts are drawn: + +1. **Connected Servers** + * connected + * missing + +2. **Active Clients** + * active + +3. **Queries** per second + * queries + +4. **Documents** per second + * documents + +### configuration + +```yaml + +localhost: + name : 'local' + host : '127.0.0.1' + port : 28015 + user : "user" + password : "pass" +``` + +When no configuration file is found, module tries to connect to `127.0.0.1:28015`. + +--- diff --git a/python.d/rethinkdbs.chart.py b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py similarity index 100% rename from python.d/rethinkdbs.chart.py rename to collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py diff --git a/conf.d/python.d/rethinkdbs.conf b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf similarity index 100% rename from conf.d/python.d/rethinkdbs.conf rename to collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf diff --git a/collectors/python.d.plugin/retroshare/README.md b/collectors/python.d.plugin/retroshare/README.md new file mode 100644 index 0000000000..e95095c656 --- /dev/null +++ b/collectors/python.d.plugin/retroshare/README.md @@ -0,0 +1 @@ +# retroshare diff --git a/python.d/retroshare.chart.py b/collectors/python.d.plugin/retroshare/retroshare.chart.py similarity index 100% rename from python.d/retroshare.chart.py rename to collectors/python.d.plugin/retroshare/retroshare.chart.py diff --git a/conf.d/python.d/retroshare.conf b/collectors/python.d.plugin/retroshare/retroshare.conf similarity index 100% rename from conf.d/python.d/retroshare.conf rename to collectors/python.d.plugin/retroshare/retroshare.conf diff --git a/collectors/python.d.plugin/samba/README.md b/collectors/python.d.plugin/samba/README.md new file mode 100644 index 0000000000..47a5551c06 --- /dev/null +++ b/collectors/python.d.plugin/samba/README.md @@ -0,0 +1,61 @@ +# samba + +Performance metrics of Samba file sharing. + +It produces the following charts: + +1. **Syscall R/Ws** in kilobytes/s + * sendfile + * recvfle + +2. **Smb2 R/Ws** in kilobytes/s + * readout + * writein + * readin + * writeout + +3. **Smb2 Create/Close** in operations/s + * create + * close + +4. **Smb2 Info** in operations/s + * getinfo + * setinfo + +5. **Smb2 Find** in operations/s + * find + +6. **Smb2 Notify** in operations/s + * notify + +7. **Smb2 Lesser Ops** as counters + * tcon + * negprot + * tdis + * cancel + * logoff + * flush + * lock + * keepalive + * break + * sessetup + +### configuration + +Requires that smbd has been compiled with profiling enabled. Also required +that `smbd` was started either with the `-P 1` option or inside `smb.conf` +using `smbd profiling level`. + +This plugin uses `smbstatus -P` which can only be executed by root. It uses +sudo and assumes that it is configured such that the `netdata` user can +execute smbstatus as root without password. + +For example: + + netdata ALL=(ALL) NOPASSWD: /usr/bin/smbstatus -P + +```yaml +update_every : 5 # update frequency +``` + +--- diff --git a/python.d/samba.chart.py b/collectors/python.d.plugin/samba/samba.chart.py similarity index 100% rename from python.d/samba.chart.py rename to collectors/python.d.plugin/samba/samba.chart.py diff --git a/conf.d/python.d/samba.conf b/collectors/python.d.plugin/samba/samba.conf similarity index 100% rename from conf.d/python.d/samba.conf rename to collectors/python.d.plugin/samba/samba.conf diff --git a/collectors/python.d.plugin/sensors/README.md b/collectors/python.d.plugin/sensors/README.md new file mode 100644 index 0000000000..2ee4fa8f60 --- /dev/null +++ b/collectors/python.d.plugin/sensors/README.md @@ -0,0 +1,17 @@ +# sensors + +System sensors information. + +Charts are created dynamically. + +### configuration + +For detailed configuration information please read [`sensors.conf`](https://github.com/netdata/netdata/blob/master/conf.d/python.d/sensors.conf) file. + +### possible issues + +There have been reports from users that on certain servers, ACPI ring buffer errors are printed by the kernel (`dmesg`) when ACPI sensors are being accessed. +We are tracking such cases in issue [#827](https://github.com/netdata/netdata/issues/827). +Please join this discussion for help. + +--- diff --git a/python.d/sensors.chart.py b/collectors/python.d.plugin/sensors/sensors.chart.py similarity index 100% rename from python.d/sensors.chart.py rename to collectors/python.d.plugin/sensors/sensors.chart.py diff --git a/conf.d/python.d/sensors.conf b/collectors/python.d.plugin/sensors/sensors.conf similarity index 100% rename from conf.d/python.d/sensors.conf rename to collectors/python.d.plugin/sensors/sensors.conf diff --git a/collectors/python.d.plugin/smartd_log/README.md b/collectors/python.d.plugin/smartd_log/README.md new file mode 100644 index 0000000000..121a635732 --- /dev/null +++ b/collectors/python.d.plugin/smartd_log/README.md @@ -0,0 +1,38 @@ +# smartd_log + +Module monitor `smartd` log files to collect HDD/SSD S.M.A.R.T attributes. + +It produces following charts (you can add additional attributes in the module configuration file): + +1. **Read Error Rate** attribute 1 + +2. **Start/Stop Count** attribute 4 + +3. **Reallocated Sectors Count** attribute 5 + +4. **Seek Error Rate** attribute 7 + +5. **Power-On Hours Count** attribute 9 + +6. **Power Cycle Count** attribute 12 + +7. **Load/Unload Cycles** attribute 193 + +8. **Temperature** attribute 194 + +9. **Current Pending Sectors** attribute 197 + +10. **Off-Line Uncorrectable** attribute 198 + +11. **Write Error Rate** attribute 200 + +### configuration + +```yaml +local: + log_path : '/var/log/smartd/' +``` + +If no configuration is given, module will attempt to read log files in /var/log/smartd/ directory. + +--- diff --git a/python.d/smartd_log.chart.py b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py similarity index 100% rename from python.d/smartd_log.chart.py rename to collectors/python.d.plugin/smartd_log/smartd_log.chart.py diff --git a/conf.d/python.d/smartd_log.conf b/collectors/python.d.plugin/smartd_log/smartd_log.conf similarity index 100% rename from conf.d/python.d/smartd_log.conf rename to collectors/python.d.plugin/smartd_log/smartd_log.conf diff --git a/collectors/python.d.plugin/spigotmc/README.md b/collectors/python.d.plugin/spigotmc/README.md new file mode 100644 index 0000000000..ae56025870 --- /dev/null +++ b/collectors/python.d.plugin/spigotmc/README.md @@ -0,0 +1,22 @@ +# spigotmc + +This module does some really basic monitoring for Spigot Minecraft servers. + +It provides two charts, one tracking server-side ticks-per-second in +1, 5 and 15 minute averages, and one tracking the number of currently +active users. + +This is not compatible with Spigot plugins which change the format of +the data returned by the `tps` or `list` console commands. + +### configuration + +```yaml +host: localhost +port: 25575 +password: pass +``` + +By default, a connection to port 25575 on the local system is attempted with an empty password. + +--- diff --git a/python.d/spigotmc.chart.py b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py similarity index 100% rename from python.d/spigotmc.chart.py rename to collectors/python.d.plugin/spigotmc/spigotmc.chart.py diff --git a/conf.d/python.d/spigotmc.conf b/collectors/python.d.plugin/spigotmc/spigotmc.conf similarity index 100% rename from conf.d/python.d/spigotmc.conf rename to collectors/python.d.plugin/spigotmc/spigotmc.conf diff --git a/collectors/python.d.plugin/springboot/README.md b/collectors/python.d.plugin/springboot/README.md new file mode 100644 index 0000000000..4fba60c1ca --- /dev/null +++ b/collectors/python.d.plugin/springboot/README.md @@ -0,0 +1,129 @@ +# springboot + +This module will monitor one or more Java Spring-boot applications depending on configuration. + +It produces following charts: + +1. **Response Codes** in requests/s + * 1xx + * 2xx + * 3xx + * 4xx + * 5xx + * others + +2. **Threads** + * daemon + * total + +3. **GC Time** in milliseconds and **GC Operations** in operations/s + * Copy + * MarkSweep + * ... + +4. **Heap Mmeory Usage** in KB + * used + * committed + +### configuration + +Please see the [Monitoring Java Spring Boot Applications](https://github.com/netdata/netdata/wiki/Monitoring-Java-Spring-Boot-Applications) page for detailed info about module configuration. + +--- + +# Monitoring Java Spring Boot Applications + +Netdata can be used to monitor running Java [Spring Boot](https://spring.io/) applications that expose their metrics with the use of the **Spring Boot Actuator** included in Spring Boot library. + +The Spring Boot Actuator exposes these metrics over HTTP and is very easy to use: +* add `org.springframework.boot:spring-boot-starter-actuator` to your application dependencies +* set `endpoints.metrics.sensitive=false` in your `application.properties` + +You can create custom Metrics by add and inject a PublicMetrics in your application. +This is a example to add custom metrics: +```java +package com.example; + +import org.springframework.boot.actuate.endpoint.PublicMetrics; +import org.springframework.boot.actuate.metrics.Metric; +import org.springframework.stereotype.Service; + +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryPoolMXBean; +import java.util.ArrayList; +import java.util.Collection; + +@Service +public class HeapPoolMetrics implements PublicMetrics { + + private static final String PREFIX = "mempool."; + private static final String KEY_EDEN = PREFIX + "eden"; + private static final String KEY_SURVIVOR = PREFIX + "survivor"; + private static final String KEY_TENURED = PREFIX + "tenured"; + + @Override + public Collection<Metric<?>> metrics() { + Collection<Metric<?>> result = new ArrayList<>(4); + for (MemoryPoolMXBean mem : ManagementFactory.getMemoryPoolMXBeans()) { + String poolName = mem.getName(); + String name = null; + if (poolName.indexOf("Eden Space") != -1) { + name = KEY_EDEN; + } else if (poolName.indexOf("Survivor Space") != -1) { + name = KEY_SURVIVOR; + } else if (poolName.indexOf("Tenured Gen") != -1 || poolName.indexOf("Old Gen") != -1) { + name = KEY_TENURED; + } + + if (name != null) { + result.add(newMemoryMetric(name, mem.getUsage().getMax())); + result.add(newMemoryMetric(name + ".init", mem.getUsage().getInit())); + result.add(newMemoryMetric(name + ".committed", mem.getUsage().getCommitted())); + result.add(newMemoryMetric(name + ".used", mem.getUsage().getUsed())); + } + } + return result; + } + + private Metric<Long> newMemoryMetric(String name, long bytes) { + return new Metric<>(name, bytes / 1024); + } +} +``` + +Please refer [Spring Boot Actuator: Production-ready features](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready.html) and [81. Actuator - Part IX. ‘How-to’ guides](https://docs.spring.io/spring-boot/docs/current/reference/html/howto-actuator.html) for more information. + +## Using netdata springboot module + +The springboot module is enabled by default. It looks up `http://localhost:8080/metrics` and `http://127.0.0.1:8080/metrics` to detect Spring Boot application by default. You can change it by editing `/etc/netdata/python.d/springboot.conf` (to edit it on your system run `/etc/netdata/edit-config python.d/springboot.conf`). + +This module defines some common charts, and you can add custom charts by change the configurations. + +The configuration format is like: +```yaml +<id>: + name: '<name>' + url: '<metrics endpoint>' # ex. http://localhost:8080/metrics + user: '<username>' # optional + pass: '<password>' # optional + defaults: + [<chart-id>]: true|false + extras: + - id: '<chart-id>' + options: + title: '***' + units: '***' + family: '***' + context: 'springboot.***' + charttype: 'stacked' | 'area' | 'line' + lines: + - { dimension: 'myapp_ok', name: 'ok', algorithm: 'absolute', multiplier: 1, divisor: 1} # it shows "myapp.ok" metrics + - { dimension: 'myapp_ng', name: 'ng', algorithm: 'absolute', multiplier: 1, divisor: 1} # it shows "myapp.ng" metrics +``` + +By default, it creates `response_code`, `threads`, `gc_time`, `gc_ope` abd `heap` charts. +You can disable the default charts by set `defaults.<chart-id>: false`. + +The dimension name of extras charts should replace `.` to `_`. + +Please check [springboot.conf](https://github.com/netdata/netdata/blob/master/conf.d/python.d/springboot.conf) for more examples. \ No newline at end of file diff --git a/python.d/springboot.chart.py b/collectors/python.d.plugin/springboot/springboot.chart.py similarity index 100% rename from python.d/springboot.chart.py rename to collectors/python.d.plugin/springboot/springboot.chart.py diff --git a/conf.d/python.d/springboot.conf b/collectors/python.d.plugin/springboot/springboot.conf similarity index 100% rename from conf.d/python.d/springboot.conf rename to collectors/python.d.plugin/springboot/springboot.conf diff --git a/collectors/python.d.plugin/squid/README.md b/collectors/python.d.plugin/squid/README.md new file mode 100644 index 0000000000..9c9b62f27b --- /dev/null +++ b/collectors/python.d.plugin/squid/README.md @@ -0,0 +1,38 @@ +# squid + +This module will monitor one or more squid instances depending on configuration. + +It produces following charts: + +1. **Client Bandwidth** in kilobits/s + * in + * out + * hits + +2. **Client Requests** in requests/s + * requests + * hits + * errors + +3. **Server Bandwidth** in kilobits/s + * in + * out + +4. **Server Requests** in requests/s + * requests + * errors + +### configuration + +```yaml +priority : 50000 + +local: + request : 'cache_object://localhost:3128/counters' + host : 'localhost' + port : 3128 +``` + +Without any configuration module will try to autodetect where squid presents its `counters` data + +--- diff --git a/python.d/squid.chart.py b/collectors/python.d.plugin/squid/squid.chart.py similarity index 100% rename from python.d/squid.chart.py rename to collectors/python.d.plugin/squid/squid.chart.py diff --git a/conf.d/python.d/squid.conf b/collectors/python.d.plugin/squid/squid.conf similarity index 100% rename from conf.d/python.d/squid.conf rename to collectors/python.d.plugin/squid/squid.conf diff --git a/collectors/python.d.plugin/tomcat/README.md b/collectors/python.d.plugin/tomcat/README.md new file mode 100644 index 0000000000..e548bd3382 --- /dev/null +++ b/collectors/python.d.plugin/tomcat/README.md @@ -0,0 +1,33 @@ +# tomcat + +Present tomcat containers memory utilization. + +Charts: + +1. **Requests** per second + * accesses + +2. **Volume** in KB/s + * volume + +3. **Threads** + * current + * busy + +4. **JVM Free Memory** in MB + * jvm + +### configuration + +```yaml +localhost: + name : 'local' + url : 'http://127.0.0.1:8080/manager/status?XML=true' + user : 'tomcat_username' + pass : 'secret_tomcat_password' +``` + +Without configuration, module attempts to connect to `http://localhost:8080/manager/status?XML=true`, without any credentials. +So it will probably fail. + +--- diff --git a/python.d/tomcat.chart.py b/collectors/python.d.plugin/tomcat/tomcat.chart.py similarity index 100% rename from python.d/tomcat.chart.py rename to collectors/python.d.plugin/tomcat/tomcat.chart.py diff --git a/conf.d/python.d/tomcat.conf b/collectors/python.d.plugin/tomcat/tomcat.conf similarity index 100% rename from conf.d/python.d/tomcat.conf rename to collectors/python.d.plugin/tomcat/tomcat.conf diff --git a/collectors/python.d.plugin/traefik/README.md b/collectors/python.d.plugin/traefik/README.md new file mode 100644 index 0000000000..9b4a182085 --- /dev/null +++ b/collectors/python.d.plugin/traefik/README.md @@ -0,0 +1,54 @@ +# traefik + +Module uses the `health` API to provide statistics. + +It produces: + +1. **Responses** by statuses + * success (1xx, 2xx, 304) + * error (5xx) + * redirect (3xx except 304) + * bad (4xx) + * other (all other responses) + +2. **Responses** by codes + * 2xx (successful) + * 5xx (internal server errors) + * 3xx (redirect) + * 4xx (bad) + * 1xx (informational) + * other (non-standart responses) + +3. **Detailed Response Codes** requests/s (number of responses for each response code family individually) + +4. **Requests**/s + * request statistics + +5. **Total response time** + * sum of all response time + +6. **Average response time** + +7. **Average response time per iteration** + +8. **Uptime** + * Traefik server uptime + +### configuration + +Needs only `url` to server's `health` + +Here is an example for local server: + +```yaml +update_every : 1 +priority : 60000 + +local: + url : 'http://localhost:8080/health' + retries : 10 +``` + +Without configuration, module attempts to connect to `http://localhost:8080/health`. + +--- diff --git a/python.d/traefik.chart.py b/collectors/python.d.plugin/traefik/traefik.chart.py similarity index 100% rename from python.d/traefik.chart.py rename to collectors/python.d.plugin/traefik/traefik.chart.py diff --git a/conf.d/python.d/traefik.conf b/collectors/python.d.plugin/traefik/traefik.conf similarity index 100% rename from conf.d/python.d/traefik.conf rename to collectors/python.d.plugin/traefik/traefik.conf diff --git a/collectors/python.d.plugin/unbound/README.md b/collectors/python.d.plugin/unbound/README.md new file mode 100644 index 0000000000..3b4fa16fd5 --- /dev/null +++ b/collectors/python.d.plugin/unbound/README.md @@ -0,0 +1,76 @@ +# unbound + +Monitoring uses the remote control interface to fetch statistics. + +Provides the following charts: + +1. **Queries Processed** + * Ratelimited + * Cache Misses + * Cache Hits + * Expired + * Prefetched + * Recursive + +2. **Request List** + * Average Size + * Max Size + * Overwritten Requests + * Overruns + * Current Size + * User Requests + +3. **Recursion Timings** + * Average recursion processing time + * Median recursion processing time + +If extended stats are enabled, also provides: + +4. **Cache Sizes** + * Message Cache + * RRset Cache + * Infra Cache + * DNSSEC Key Cache + * DNSCrypt Shared Secret Cache + * DNSCrypt Nonce Cache + +### configuration + +Unbound must be manually configured to enable the remote-control protocol. +Check the Unbound documentation for info on how to do this. Additionally, +if you want to take advantage of the autodetection this plugin offers, +you will need to make sure your `unbound.conf` file only uses spaces for +indentation (the default config shipped by most distributions uses tabs +instead of spaces). + +Once you have the Unbound control protocol enabled, you need to make sure +that either the certificate and key are readable by Netdata (if you're +using the regular control interface), or that the socket is accessible +to Netdata (if you're using a UNIX socket for the contorl interface). + +By default, for the local system, everything can be auto-detected +assuming Unbound is configured correctly and has been told to listen +on the loopback interface or a UNIX socket. This is done by looking +up info in the Unbound config file specified by the `ubconf` key. + +To enable extended stats for a given job, add `extended: yes` to the +definition. + +You can also enable per-thread charts for a given job by adding +`per_thread: yes` to the definition. Note that the numbe rof threads +is only checked on startup. + +A basic local configuration with extended statistics and per-thread +charts looks like this: + +```yaml +local: + ubconf: /etc/unbound/unbound.conf + extended: yes + per_thread: yes +``` + +While it's a bit more complicated to set up correctly, it is recommended +that you use a UNIX socket as it provides far better performance. + +--- diff --git a/python.d/unbound.chart.py b/collectors/python.d.plugin/unbound/unbound.chart.py similarity index 100% rename from python.d/unbound.chart.py rename to collectors/python.d.plugin/unbound/unbound.chart.py diff --git a/conf.d/python.d/unbound.conf b/collectors/python.d.plugin/unbound/unbound.conf similarity index 100% rename from conf.d/python.d/unbound.conf rename to collectors/python.d.plugin/unbound/unbound.conf diff --git a/collectors/python.d.plugin/varnish/README.md b/collectors/python.d.plugin/varnish/README.md new file mode 100644 index 0000000000..96c7cafaa2 --- /dev/null +++ b/collectors/python.d.plugin/varnish/README.md @@ -0,0 +1,69 @@ +# varnish + +Module uses the `varnishstat` command to provide varnish cache statistics. + +It produces: + +1. **Connections Statistics** in connections/s + * accepted + * dropped + +2. **Client Requests** in requests/s + * received + +3. **All History Hit Rate Ratio** in percent + * hit + * miss + * hitpass + +4. **Current Poll Hit Rate Ratio** in percent + * hit + * miss + * hitpass + +5. **Expired Objects** in expired/s + * objects + +6. **Least Recently Used Nuked Objects** in nuked/s + * objects + + +7. **Number Of Threads In All Pools** in threads + * threads + +8. **Threads Statistics** in threads/s + * created + * failed + * limited + +9. **Current Queue Length** in requests + * in queue + +10. **Backend Connections Statistics** in connections/s + * successful + * unhealthy + * reused + * closed + * resycled + * failed + +10. **Requests To The Backend** in requests/s + * received + +11. **ESI Statistics** in problems/s + * errors + * warnings + +12. **Memory Usage** in MB + * free + * allocated + +13. **Uptime** in seconds + * uptime + + +### configuration + +No configuration is needed. + +--- diff --git a/python.d/varnish.chart.py b/collectors/python.d.plugin/varnish/varnish.chart.py similarity index 100% rename from python.d/varnish.chart.py rename to collectors/python.d.plugin/varnish/varnish.chart.py diff --git a/conf.d/python.d/varnish.conf b/collectors/python.d.plugin/varnish/varnish.conf similarity index 100% rename from conf.d/python.d/varnish.conf rename to collectors/python.d.plugin/varnish/varnish.conf diff --git a/collectors/python.d.plugin/w1sensor/README.md b/collectors/python.d.plugin/w1sensor/README.md new file mode 100644 index 0000000000..1a01d4522f --- /dev/null +++ b/collectors/python.d.plugin/w1sensor/README.md @@ -0,0 +1,13 @@ +# w1sensor + +Data from 1-Wire sensors. +On Linux these are supported by the wire, w1_gpio, and w1_therm modules. +Currently temperature sensors are supported and automatically detected. + +Charts are created dynamically based on the number of detected sensors. + +### configuration + +For detailed configuration information please read [`w1sensor.conf`](https://github.com/netdata/netdata/blob/master/conf.d/python.d/w1sensor.conf) file. + +--- diff --git a/python.d/w1sensor.chart.py b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py similarity index 100% rename from python.d/w1sensor.chart.py rename to collectors/python.d.plugin/w1sensor/w1sensor.chart.py diff --git a/conf.d/python.d/w1sensor.conf b/collectors/python.d.plugin/w1sensor/w1sensor.conf similarity index 100% rename from conf.d/python.d/w1sensor.conf rename to collectors/python.d.plugin/w1sensor/w1sensor.conf diff --git a/collectors/python.d.plugin/web_log/README.md b/collectors/python.d.plugin/web_log/README.md new file mode 100644 index 0000000000..6e8ea1dd5f --- /dev/null +++ b/collectors/python.d.plugin/web_log/README.md @@ -0,0 +1,64 @@ +# web_log + +Tails the apache/nginx/lighttpd/gunicorn log files to collect real-time web-server statistics. + +It produces following charts: + +1. **Response by type** requests/s + * success (1xx, 2xx, 304) + * error (5xx) + * redirect (3xx except 304) + * bad (4xx) + * other (all other responses) + +2. **Response by code family** requests/s + * 1xx (informational) + * 2xx (successful) + * 3xx (redirect) + * 4xx (bad) + * 5xx (internal server errors) + * other (non-standart responses) + * unmatched (the lines in the log file that are not matched) + +3. **Detailed Response Codes** requests/s (number of responses for each response code family individually) + +4. **Bandwidth** KB/s + * received (bandwidth of requests) + * send (bandwidth of responses) + +5. **Timings** ms (request processing time) + * min (bandwidth of requests) + * max (bandwidth of responses) + * average (bandwidth of responses) + +6. **Request per url** requests/s (configured by user) + +7. **Http Methods** requests/s (requests per http method) + +8. **Http Versions** requests/s (requests per http version) + +9. **IP protocols** requests/s (requests per ip protocol version) + +10. **Current Poll Unique Client IPs** unique ips/s (unique client IPs per data collection iteration) + +11. **All Time Unique Client IPs** unique ips/s (unique client IPs since the last restart of netdata) + + +### configuration + +```yaml +nginx_log: + name : 'nginx_log' + path : '/var/log/nginx/access.log' + +apache_log: + name : 'apache_log' + path : '/var/log/apache/other_vhosts_access.log' + categories: + cacti : 'cacti.*' + observium : 'observium' +``` + +Module has preconfigured jobs for nginx, apache and gunicorn on various distros. + +--- diff --git a/python.d/web_log.chart.py b/collectors/python.d.plugin/web_log/web_log.chart.py similarity index 100% rename from python.d/web_log.chart.py rename to collectors/python.d.plugin/web_log/web_log.chart.py diff --git a/conf.d/python.d/web_log.conf b/collectors/python.d.plugin/web_log/web_log.conf similarity index 100% rename from conf.d/python.d/web_log.conf rename to collectors/python.d.plugin/web_log/web_log.conf diff --git a/collectors/statsd.plugin/Makefile.am b/collectors/statsd.plugin/Makefile.am new file mode 100644 index 0000000000..7566052ef9 --- /dev/null +++ b/collectors/statsd.plugin/Makefile.am @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +statsdconfigdir=$(libconfigdir)/statsd.d +dist_statsdconfig_DATA = \ + example.conf \ + $(NULL) diff --git a/collectors/statsd.plugin/README.md b/collectors/statsd.plugin/README.md new file mode 100644 index 0000000000..6ef038343a --- /dev/null +++ b/collectors/statsd.plugin/README.md @@ -0,0 +1,523 @@ +# Netdata Statsd + +statsd is a system to collect data from any application. Applications are sending metrics to it, usually via non-blocking UDP communication, and statsd servers collect these metrics, perform a few simple calculations on them and push them to backend time-series databases. + +There is a [plethora of client libraries](https://github.com/etsy/statsd/wiki#client-implementations) for embedding statsd metrics to any application framework. This makes statsd quite popular for custom application metrics. + +## netdata statsd + +netdata is a fully featured statsd server. It can collect statsd formatted metrics, visualize them on its dashboards, stream them to other netdata servers or archive them to backend time-series databases. + +netdata statsd is inside netdata (an internal plugin, running inside the netdata daemon), it is configured via `netdata.conf` and by-default listens on standard statsd ports (tcp and udp 8125 - yes, netdata statsd server supports both tcp and udp at the same time). + +Since statsd is embedded in netdata, it means you now have a statsd server embedded on all your servers. So, the application can send its metrics to `localhost:8125`. This provides a distributed statsd implementation. + +netdata statsd is fast. It can collect more than **1.200.000 metrics per second** on modern hardware, more than **200Mbps of sustained statsd traffic**, using 1 CPU core (yes, it is single threaded - actually double-threaded, one thread collects metrics, another one updates the charts from the collected data). + +## metrics supported by netdata + +netdata fully supports the statsd protocol. All statsd client libraries can be used with netdata too. + +- **Gauges** + + The application sends `name:value|g`, where `value` is any **decimal/fractional** number, statsd reports the latest value collected and the number of times it was updated (events). + + The application may increment or decrement a previous value, by setting the first character of the value to ` + ` or ` - ` (so, the only way to set a gauge to an absolute negative value, is to first set it to zero). + + Sampling rate is supported (check below). + + When a gauge is not collected and the setting is not to show gaps on the charts (the default), the last value will be shown, until a data collection event changes it. + +- **Counters** and **Meters** + + The application sends `name:value|c`, `name:value|C` or `name:value|m`, where `value` is a positive or negative **integer** number of events occurred, statsd reports the **rate** and the number of times it was updated (events). + + `:value` can be omitted and statsd will assume it is `1`. `|c`, `|C` and `|m` can be omitted an statsd will assume it is `|m`. So, the application may send just `name` and statsd will parse it as `name:1|m`. + + For counters use `|c` (esty/statsd compatible) or `|C` (brubeck compatible), for meters use `|m`. + + Sampling rate is supported (check below). + + When a counter or meter is not collected and the setting is not to show gaps on the charts (the default), zero will be shown, until a data collection event changes it. + +- **Timers** and **Histograms** + + The application sends `name:value|ms` or `name:value|h`, where ` value` is any **decimal/fractional** number, statsd reports **min**, **max**, **average**, **sum**, **95th percentile**, **median** and **standard deviation** and the total number of times it was updated (events). + + For timers use `|ms`, or histograms use `|h`. The only difference between the two, is the `units` of the charts (timers report milliseconds). + + Sampling rate is supported (check below). + + When a timer or histogram is not collected and the setting is not to show gaps on the charts (the default), zero will be shown, until a data collection event changes it. + +- **Sets** + + The application sends `name:value|s`, where `value` is anything (**number or text**, leading and trailing spaces are removed), statsd reports the number of unique values sent and the number of times it was updated (events). + + Sampling rate is **not** supported for Sets. `value` is always considered text. + + When a set is not collected and the setting is not to show gaps on the charts (the default), zero will be shown, until a data collection event changes it. + +#### Sampling Rates + +The application may append `|@sampling_rate`, where `sampling_rate` is a number from `0.0` to `1.0`, to have statsd extrapolate the value, to predict to total for the whole period. So, if the application reports to statsd a value for 1/10th of the time, it can append `|@0.1` to the metrics it sends to statsd. + +#### Overlapping metrics + +netdata statsd maintains different indexes for each of the types supported. This means the same metric `name` may exist under different types concurrently. + +#### Multiple metrics per packet + +netdata accepts multiple metrics per packet if each is terminated with `\n`. + +#### TCP packets + +netdata listens for both TCP and UDP packets. For TCP though, is it important to always append `\n` on each metric. netdata uses this to detect if a metric is split into multiple TCP packets. On disconnect, even the remaining (non terminated with `\n`) buffer, is processed. + +#### UDP packets + +When sending multiple packets over UDP, it is important not to exceed the network MTU (usually 1500 bytes minus a few bytes for the headers). netdata will accept UDP packets up to 9000 bytes, but the underlying network will not exceed MTU. + +## configuration + +This is the statsd configuration at `/etc/netdata/netdata.conf`: + +``` +[statsd] + # enabled = yes + # decimal detail = 1000 + # update every (flushInterval) = 1 + # udp messages to process at once = 10 + # create private charts for metrics matching = * + # max private charts allowed = 200 + # max private charts hard limit = 1000 + # private charts memory mode = save + # private charts history = 3996 + # histograms and timers percentile (percentThreshold) = 95.00000 + # add dimension for number of events received = yes + # gaps on gauges (deleteGauges) = no + # gaps on counters (deleteCounters) = no + # gaps on meters (deleteMeters) = no + # gaps on sets (deleteSets) = no + # gaps on histograms (deleteHistograms) = no + # gaps on timers (deleteTimers) = no + # listen backlog = 4096 + # default port = 8125 + # bind to = udp:localhost:8125 tcp:localhost:8125 +``` + +### statsd main config options +- `enabled = yes|no` + + controls if statsd will be enabled for this netdata. The default is enabled. + +- `default port = 8125` + + controls the port statsd will use. This is the default, since the next line, allows defining ports too. + +- `bind to = udp:localhost tcp:localhost` + + is a space separated list of IPs and ports to listen to. The format is `PROTOCOL:IP:PORT` - if `PORT` is omitted, the `default port` will be used. If `IP` is IPv6, it needs to be enclosed in `[]`. `IP` can also be ` * ` (to listen on all IPs) or even a hostname. + +- `update every (flushInterval) = 1` seconds, controls the frequency statsd will push the collected metrics to netdata charts. + +- `decimal detail = 1000` controls the number of fractional digits in gauges and histograms. netdata collects metrics using signed 64 bit integers and their fractional detail is controlled using multipliers and divisors. This setting is used to multiply all collected values to convert them to integers and is also set as the divisors, so that the final data will be a floating point number with this fractional detail (1000 = X.0 - X.999, 10000 = X.0 - X.9999, etc). + +The rest of the settings are discussed below. + +## statsd charts + +netdata can visualize statsd collected metrics in 2 ways: + +1. Each metric gets its own **private chart**. This is the default and does not require any configuration (although there are a few options to tweak). + +2. **Synthetic charts** can be created, combining multiple metrics, independently of their metric types. For this type of charts, special configuration is required, to define the chart title, type, units, its dimensions, etc. + +### private metric charts + +Private charts are controlled with `create private charts for metrics matching = *`. This setting accepts a space separated list of simple patterns (use `*` as wildcard, prepend a pattern with `!` for a negative match, the order of patterns is important). + +So to render charts for all `myapp.*` metrics, except `myapp.*.badmetric`, use: + +``` +create private charts for metrics matching = !myapp.*.badmetric myapp.* +``` + +The default is to render private charts for all metrics. + +The `memory mode` of the round robin database and the `history` of private metric charts are controlled with `private charts memory mode` and `private charts history`. The defaults for both settings is to use the global netdata settings. So, you need to edit them only when you want statsd to use different settings compared to the global ones. + +If you have thousands of metrics, each with its own private chart, you may notice that your web browser becomes slow when you view the netdata dashboard (this is a web browser issue we need to address at the netdata UI). So, netdata has a protection to stop creating charts when `max private charts allowed = 200` (soft limit) is reached. + +The metrics above this soft limit are still processed by netdata and will be available to be sent to backend time-series databases, up to `max private charts hard limit = 1000`. So, between 200 and 1000 charts, netdata will still generate charts, but they will automatically be created with `memory mode = none` (netdata will not maintain a database for them). These metrics will be sent to backend time series databases, if the backend configuration is set to `as collected`. + +Metrics above the hard limit are still collected, but they can only be used in synthetic charts (once a metric is added to chart, it will be sent to backend servers too). + +Example private charts (automatically generated without any configuration): + +#### counters + +- Scope: **count the events of something** (e.g. number of file downloads) +- Format: `name:INTEGER|c` or `name:INTEGER|C` or `name|c` +- statsd increments the counter by the `INTEGER` number supplied (positive, or negative). + + + +#### gauges + +- Scope: **report the value of something** (e.g. cache memory used by the application server) +- Format: `name:FLOAT|g` +- statsd remembers the last value supplied, and can increment or decrement the latest value if `FLOAT` begins with ` + ` or ` - `. + + + +#### histograms + +- Scope: **statistics on a size of events** (e.g. statistics on the sizes of files downloaded) +- Format: `name:FLOAT|h` +- statsd maintains a list of all the values supplied and provides statistics on them. + + + +The same chart with `sum` unselected, to show the detail of the dimensions supported: + + +#### meters + +This is identical to `counter`. + +- Scope: **count the events of something** (e.g. number of file downloads) +- Format: `name:INTEGER|m` or `name|m` or just `name` +- statsd increments the counter by the `INTEGER` number supplied (positive, or negative). + + + +#### sets + +- Scope: **count the unique occurrences of something** (e.g. unique filenames downloaded, or unique users that downloaded files) +- Format: `name:TEXT|s` +- statsd maintains a unique index of all values supplied, and reports the unique entries in it. + + + +#### timers + +- Scope: **statistics on the duration of events** (e.g. statistics for the duration of file downloads) +- Format: `name:FLOAT|ms` +- statsd maintains a list of all the values supplied and provides statistics on them. + + + +The same chart with the `sum` unselected: + + + + +### synthetic statsd charts + +Using synthetic charts, you can create dedicated sections on the dashboard to render the charts. You can control everything: the main menu, the submenus, the charts, the dimensions on each chart, etc. + +Synthetic charts are organized in + +- **applications** (i.e. entries at the main menu of the netdata dashboard) +- **charts for each application** (grouped in families - i.e. submenus at the dashboard menu) +- **statsd metrics for each chart** (i.e. dimensions of the charts) + +For each application you need to create a `.conf` file in `/etc/netdata/statsd.d`. + +So, to create the statsd application `myapp`, you can create the file `/etc/netdata/statsd.d/myapp.conf`, with this content: + +``` +[app] + name = myapp + metrics = myapp.* + private charts = no + gaps when not collected = no + memory mode = ram + history = 60 + +[dictionary] + m1 = metric1 + m2 = metric2 + +# replace 'mychart' with the chart id +# the chart will be named: myapp.mychart +[mychart] + name = mychart + title = my chart title + family = my family + context = chart.context + units = tests/s + priority = 91000 + type = area + dimension = myapp.metric1 m1 + dimension = myapp.metric2 m2 +``` + +Using the above configuration `myapp` should get its own section on the dashboard, having one chart with 2 dimensions. + +`[app]` starts a new application definition. The supported settings in this section are: + +- `name` defines the name of the app. +- `metrics` is a netdata simple pattern (space separated patterns, using `*` for wildcard, possibly starting with `!` for negative match). This pattern should match all the possible statsd metrics that will be participating in the application `myapp`. +- `private charts = yes|no`, enables or disables private charts for the metrics matched. +- `gaps when not collected = yes|no`, enables or disables gaps on the charts of the application, when metrics are not collected. +- `memory mode` sets the memory mode for all charts of the application. The default is the global default for netdata (not the global default for statsd private charts). +- `history` sets the size of the round robin database for this application. The default is the global default for netdata (not the global default for statsd private charts). + +`[dictionary]` defines name-value associations. These are used to renaming metrics, when added to synthetic charts. Metric names are also defined at each `dimension` line. However, using the dictionary dimension names can be declared globally, for each app and is the only way to rename dimensions when using patterns. Of course the dictionary can be empty or missing. + +Then, you can add any number of charts. Each chart should start with `[id]`. The chart will be called `app_name.id`. `family` controls the submenu on the dashboard. `context` controls the alarm templates. `priority` controls the ordering of the charts on the dashboard. The rest of the settings are informational. + +You can add any number of metrics to a chart, using `dimension` lines. These lines accept 5 space separated parameters: + +1. the metric name, as it is collected (it has to be matched by the `metrics = ` pattern of the app) +2. the dimension name, as it should be shown on the chart +3. an optional selector (type) of the value to shown (see below) +4. an optional multiplier +5. an optional divider +6. optional flags, space separated and enclosed in quotes. All the external plugins `DIMENSION` flags can be used. Currently the only usable flag is `hidden`, to add the dimension, but not show it on the dashboard. This is usually needed to have the values available for percentage calculation, or use them in alarms. + +So, the format is this: +``` +dimension = [pattern] METRIC NAME TYPE MULTIPLIER DIVIDER OPTIONS +``` + +`pattern` is a keyword. When set, `METRIC` is expected to be a netdata simple pattern that will be used to match all the statsd metrics to be added to the chart. So, `pattern` automatically matches any number of statsd metrics, all of which will be added as separate chart dimensions. + +`TYPE`, `MUTLIPLIER`, `DIVIDER` and `OPTIONS` are optional. + +`TYPE` can be: + +- `events` to show the number of events received by statsd for this metric +- `last` to show the last value, as calculated at the flush interval of the metric (the default) + +Then for histograms and timers the following types are also supported: + +- `min`, show the minimum value +- `max`, show the maximum value +- `sum`, show the sum of all values +- `average` (same as `last`) +- `percentile`, show the 95th percentile (or any other percentile, as configured at statsd global config) +- `median`, show the median of all values (i.e. sort all values and get the middle value) +- `stddev`, show the standard deviation of the values + +#### example synthetic charts + +statsd metrics: `foo` and `bar`. + +Contents of file `/etc/netdata/stats.d/foobar.conf`: + +``` +[app] + name = foobarapp + metrics = foo bar + private charts = yes + +[foobar_chart1] + title = Hey, foo and bar together + family = foobar_family + context = foobarapp.foobars + units = foobars + type = area + dimension = foo 'foo me' last 1 1 + dimension = bar 'bar me' last 1 1 +``` + +I sent to statsd: `foo:10|g` and `bar:20|g`. + +I got these private charts: + + + +and this synthetic chart: + + + +#### dictionary to name dimensions + +The `[dictionary]` section accepts any number of `name = value` pairs. + +netdata uses this dictionary as follows: + +1. When a `dimension` has a non-empty `NAME`, that name is looked up at the dictionary. + +2. If the above lookup gives nothing, or the `dimension` has an empty `NAME`, the original statsd metric name is looked up at the dictionary. + +3. If any of the above succeeds, netdata uses the `value` of the dictionary, to set the name of the dimension. The dimensions will have as ID the original statsd metric name, and as name, the dictionary value. + +So, you can use the dictionary in 2 ways: + +1. set `dimension = myapp.metric1 ''` and have at the dictionary `myapp.metric1 = metric1 name` +2. set `dimension = myapp.metric1 'm1'` and have at the dictionary `m1 = metric1 name` + +In both cases, the dimension will be added with ID `myapp.metric1` and will be named `metric1 name`. So, in alarms you can use either of the 2 as `${myapp.metric1}` or `${metric1 name}`. + +> keep in mind that if you add multiple times the same statsd metric to a chart, netdata will append `TYPE` to the dimension ID, so `myapp.metric1` will be added as `myapp.metric1_last` or `myapp.metric1_events`, etc. If you add multiple times the same metric with the same `TYPE` to a chart, netdata will also append an incremental counter to the dimension ID, i.e. `myapp.metric1_last1`, `myapp.metric1_last2`, etc. + +#### dimension patterns + +netdata allows adding multiple dimensions to a chart, by matching the statsd metrics with a netdata simple pattern. + +Assume we have an API that provides statsd metrics for each response code per method it supports, like these: + +``` +myapp.api.get.200 +myapp.api.get.400 +myapp.api.get.500 +myapp.api.del.200 +myapp.api.del.400 +myapp.api.del.500 +myapp.api.post.200 +myapp.api.post.400 +myapp.api.post.500 +myapp.api.all.200 +myapp.api.all.400 +myapp.api.all.500 +``` + +To add all response codes of `myapp.api.get` to a chart use this: + +``` +[api_get_responses] + ... + dimension = pattern 'myapp.api.get.* '' last 1 1 +``` + +The above will add dimension named `200`, `400` and `500` (yes, netdata extracts the wildcarded part of the metric name - so the dimensions will be named with whatever the `*` matched). You can rename the dimensions with this: + +``` +[dictionary] + get.200 = 200 ok + get.400 = 400 bad request + get.500 = 500 cannot connect to db + +[api_get_responses] + ... + dimension = pattern 'myapp.api.get.* 'get.' last 1 1 +``` + +Note that we added a `NAME` to the dimension line with `get.`. This is prefixed to the wildcarded part of the metric name, to compose the key for looking up the dictionary. So `500` became `get.500` which was looked up to the dictionary to find value `500 cannot connect to db`. This way we can have different dimension names, for each of the API methods (i.e. `get.500 = 500 cannot connect to db` while `post.500 = 500 cannot write to disk`). + +To add all API methods to a chart, do this: + +``` +[ok_by_method] + ... + dimension = pattern 'myapp.api.*.200 '' last 1 1 +``` + +The above will add `get`, `post`, `del` and `all` to the chart. + +If `all` is not wanted (a `stacked` chart does not need the `all` dimension, since the sum of the dimensions provides the total), the line should be: + +``` +[ok_by_method] + ... + dimension = pattern '!myapp.api.all.* myapp.api.*.200 '' last 1 1 +``` + +With the above, all methods except `all` will be added to the chart. + +To automatically rename the methods, use this: + +``` +[dictionary] + method.get = GET + method.post = ADD + method.del = DELETE + +[ok_by_method] + ... + dimension = pattern '!myapp.api.all.* myapp.api.*.200 'method.' last 1 1 +``` + +Using the above, the dimensions will be added as `GET`, `ADD` and `DELETE`. + + +## interpolation + +~~If you send just one value to statsd, you will notice that the chart is created but no value is shown. The reason is that netdata interpolates all values at second boundaries. For incremental values (`counters` and `meters` in statsd terminology), if you send 10 at 00:00:00.500, 20 at 00:00:01.500 and 30 at 00:00:02.500, netdata will show 15 at 00:00:01 and 25 at 00:00:02.~~ + +~~This interpolation is automatic and global in netdata for all charts, for incremental values. This means that for the chart to start showing values you need to send 2 values across 2 flush intervals.~~ + +~~(although this is required for incremental values, netdata allows mixing incremental and absolute values on the same charts, so this little limitation [i.e. 2 values to start visualization], is applied on all netdata dimensions).~~ + +(statsd metrics do not loose their first data collection due to interpolation anymore - fixed with [PR #2411](https://github.com/netdata/netdata/pull/2411)) + +## sending statsd metrics from shell scripts + +You can send/update statsd metrics from shell scripts. You can use this feature, to visualize in netdata automated jobs you run on your servers. + +The command you need to run is: + +```sh +echo "NAME:VALUE|TYPE" | nc -u --send-only localhost 8125 +``` + +Where: + +- `NAME` is the metric name +- `VALUE` is the value for that metric (**gauges** `|g`, **timers** `|ms` and **histograms** `|h` accept decimal/fractional numbers, **counters** `|c` and **meters** `|m` accept integers, **sets** `|s` accept anything) +- `TYPE` is one of `g`, `ms`, `h`, `c`, `m`, `s` to select the metric type. + +So, to set `metric1` as gauge to value `10`, use: + +```sh +echo "metric1:10|g" | nc -u --send-only localhost 8125 +``` + +To increment `metric2` by `10`, as a counter, use: + +```sh +echo "metric2:10|c" | nc -u --send-only localhost 8125 +``` + +You can send multiple metrics like this: + +```sh +# send multiple metrics via UDP +printf "metric1:10|g\nmetric2:10|c\n" | nc -u --send-only localhost 8125 +``` + +Remember, for UDP communication each packet should not exceed the MTU. So, if you plan to push too many metrics at once, prefer TCP communication: + +```sh +# send multiple metrics via TCP +printf "metric1:10|g\nmetric2:10|c\n" | nc --send-only localhost 8125 +``` + +You can also use this little function to take care of all the details: + +```sh +#!/usr/bin/env bash + +STATSD_HOST="localhost" +STATSD_PORT="8125" +statsd() { + local udp="-u" all="${*}" + + # if the string length of all parameters given is above 1000, use TCP + [ "${#all}" -gt 1000 ] && udp= + + while [ ! -z "${1}" ] + do + printf "${1}\n" + shift + done | nc ${udp} --send-only ${STATSD_HOST} ${STATSD_PORT} || return 1 + + return 0 +} +``` + +You can use it like this: + +```sh +# first, source it in your script +source statsd.sh + +# then, at any point: +statsd "metric1:10|g" "metric2:10|c" ... +``` + +The function is smart enough to call `nc` just once and pass all the metrics to it. It will also automatically switch to TCP if the metrics to send are above 1000 bytes. diff --git a/conf.d/statsd.d/example.conf b/collectors/statsd.plugin/example.conf similarity index 95% rename from conf.d/statsd.d/example.conf rename to collectors/statsd.plugin/example.conf index f7c12b4ab3..2c7de6c7bd 100644 --- a/conf.d/statsd.d/example.conf +++ b/collectors/statsd.plugin/example.conf @@ -1,6 +1,7 @@ # statsd synthetic charts configuration -# You can add many .conf files, one for each of your apps +# You can add many .conf files in /etc/netdata/statsd.d/, +# one for each of your apps. # start a new app - you can add many apps in the same file [app] @@ -26,8 +27,6 @@ # the default is to use the global history #history = 3600 - - # create a chart # this is its id - the chart will be named myexampleapp.myexamplechart [myexamplechart] diff --git a/src/plugins/statsd.plugin/statsd.c b/collectors/statsd.plugin/statsd.c similarity index 100% rename from src/plugins/statsd.plugin/statsd.c rename to collectors/statsd.plugin/statsd.c diff --git a/src/plugins/statsd.plugin/statsd.h b/collectors/statsd.plugin/statsd.h similarity index 93% rename from src/plugins/statsd.plugin/statsd.h rename to collectors/statsd.plugin/statsd.h index 84de45b9d8..b741be76d4 100644 --- a/src/plugins/statsd.plugin/statsd.h +++ b/collectors/statsd.plugin/statsd.h @@ -3,7 +3,7 @@ #ifndef NETDATA_STATSD_H #define NETDATA_STATSD_H 1 -#include "../../common.h" +#include "../../daemon/common.h" #define STATSD_LISTEN_PORT 8125 #define STATSD_LISTEN_BACKLOG 4096 diff --git a/collectors/tc.plugin/Makefile.am b/collectors/tc.plugin/Makefile.am new file mode 100644 index 0000000000..f77e67d915 --- /dev/null +++ b/collectors/tc.plugin/Makefile.am @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +CLEANFILES = \ + tc-qos-helper.sh \ + $(NULL) + +include $(top_srcdir)/build/subst.inc +SUFFIXES = .in + +dist_plugins_SCRIPTS = \ + tc-qos-helper.sh \ + $(NULL) + +dist_noinst_DATA = \ + tc-qos-helper.sh.in \ + README.md \ + $(NULL) diff --git a/collectors/tc.plugin/README.md b/collectors/tc.plugin/README.md new file mode 100644 index 0000000000..d0e5f9c4c9 --- /dev/null +++ b/collectors/tc.plugin/README.md @@ -0,0 +1,9 @@ +## tc.plugin + +Netdata monitors `tc` QoS classes for all interfaces. + +If you also use [FireQOS](http://firehol.org/tutorial/fireqos-new-user/)) it will collect interface and class names. + +There is a [shell helper](tc-qos-helper.sh.in) for this (all parsing is done by the plugin in `C` code - this shell script is just a configuration for the command to run to get `tc` output). + +The source of the tc plugin is [here](plugin_tc.c). It is somewhat complex, because a state machine was needed to keep track of all the `tc` classes, including the pseudo classes tc dynamically creates. diff --git a/src/plugins/linux-tc.plugin/plugin_tc.c b/collectors/tc.plugin/plugin_tc.c similarity index 100% rename from src/plugins/linux-tc.plugin/plugin_tc.c rename to collectors/tc.plugin/plugin_tc.c diff --git a/src/plugins/linux-tc.plugin/plugin_tc.h b/collectors/tc.plugin/plugin_tc.h similarity index 94% rename from src/plugins/linux-tc.plugin/plugin_tc.h rename to collectors/tc.plugin/plugin_tc.h index 48f2dba322..c646584152 100644 --- a/src/plugins/linux-tc.plugin/plugin_tc.h +++ b/collectors/tc.plugin/plugin_tc.h @@ -3,7 +3,7 @@ #ifndef NETDATA_PLUGIN_TC_H #define NETDATA_PLUGIN_TC_H 1 -#include "../../common.h" +#include "../../daemon/common.h" #if (TARGET_OS == OS_LINUX) diff --git a/plugins.d/tc-qos-helper.sh.in b/collectors/tc.plugin/tc-qos-helper.sh.in similarity index 100% rename from plugins.d/tc-qos-helper.sh.in rename to collectors/tc.plugin/tc-qos-helper.sh.in diff --git a/conf.d/Makefile.am b/conf.d/Makefile.am deleted file mode 100644 index 3ed7e1597a..0000000000 --- a/conf.d/Makefile.am +++ /dev/null @@ -1,199 +0,0 @@ -# -# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com> -# SPDX-License-Identifier: GPL-3.0-or-later -# -MAINTAINERCLEANFILES= $(srcdir)/Makefile.in -CLEANFILES = \ - edit-config \ - $(NULL) - -include $(top_srcdir)/build/subst.inc - -SUFFIXES = .in - -dist_config_SCRIPTS = \ - edit-config \ - $(NULL) - -dist_noinst_DATA = \ - edit-config.in \ - $(NULL) - -dist_libconfig_DATA = \ - apps_groups.conf \ - charts.d.conf \ - fping.conf \ - node.d.conf \ - python.d.conf \ - health_alarm_notify.conf \ - health_email_recipients.conf \ - stream.conf \ - $(NULL) - -nodeconfigdir=$(libconfigdir)/node.d -dist_nodeconfig_DATA = \ - $(NULL) - -usernodeconfigdir=$(configdir)/node.d -dist_usernodeconfig_DATA = \ - node.d/README.md \ - node.d/fronius.conf.md \ - node.d/named.conf.md \ - node.d/sma_webbox.conf.md \ - node.d/snmp.conf.md \ - node.d/stiebeleltron.conf.md \ - $(NULL) - -pythonconfigdir=$(libconfigdir)/python.d -dist_pythonconfig_DATA = \ - python.d/apache.conf \ - python.d/beanstalk.conf \ - python.d/bind_rndc.conf \ - python.d/boinc.conf \ - python.d/ceph.conf \ - python.d/chrony.conf \ - python.d/couchdb.conf \ - python.d/cpuidle.conf \ - python.d/cpufreq.conf \ - python.d/dns_query_time.conf \ - python.d/dnsdist.conf \ - python.d/dockerd.conf \ - python.d/dovecot.conf \ - python.d/elasticsearch.conf \ - python.d/example.conf \ - python.d/exim.conf \ - python.d/fail2ban.conf \ - python.d/freeradius.conf \ - python.d/go_expvar.conf \ - python.d/haproxy.conf \ - python.d/hddtemp.conf \ - python.d/httpcheck.conf \ - python.d/icecast.conf \ - python.d/ipfs.conf \ - python.d/isc_dhcpd.conf \ - python.d/linux_power_supply.conf \ - python.d/litespeed.conf \ - python.d/logind.conf \ - python.d/mdstat.conf \ - python.d/megacli.conf \ - python.d/memcached.conf \ - python.d/mongodb.conf \ - python.d/monit.conf \ - python.d/mysql.conf \ - python.d/nginx.conf \ - python.d/nginx_plus.conf \ - python.d/nsd.conf \ - python.d/ntpd.conf \ - python.d/ovpn_status_log.conf \ - python.d/phpfpm.conf \ - python.d/portcheck.conf \ - python.d/postfix.conf \ - python.d/postgres.conf \ - python.d/powerdns.conf \ - python.d/puppet.conf \ - python.d/rabbitmq.conf \ - python.d/redis.conf \ - python.d/rethinkdbs.conf \ - python.d/retroshare.conf \ - python.d/samba.conf \ - python.d/sensors.conf \ - python.d/springboot.conf \ - python.d/spigotmc.conf \ - python.d/squid.conf \ - python.d/smartd_log.conf \ - python.d/tomcat.conf \ - python.d/traefik.conf \ - python.d/unbound.conf \ - python.d/varnish.conf \ - python.d/w1sensor.conf \ - python.d/web_log.conf \ - $(NULL) - -healthconfigdir=$(libconfigdir)/health.d -dist_healthconfig_DATA = \ - health.d/apache.conf \ - health.d/apcupsd.conf \ - health.d/backend.conf \ - health.d/bcache.conf \ - health.d/beanstalkd.conf \ - health.d/bind_rndc.conf \ - health.d/boinc.conf \ - health.d/btrfs.conf \ - health.d/ceph.conf \ - health.d/cpu.conf \ - health.d/couchdb.conf \ - health.d/disks.conf \ - health.d/dockerd.conf \ - health.d/elasticsearch.conf \ - health.d/entropy.conf \ - health.d/fping.conf \ - health.d/fronius.conf \ - health.d/haproxy.conf \ - health.d/httpcheck.conf \ - health.d/ipc.conf \ - health.d/ipfs.conf \ - health.d/ipmi.conf \ - health.d/isc_dhcpd.conf \ - health.d/lighttpd.conf \ - health.d/linux_power_supply.conf \ - health.d/load.conf \ - health.d/mdstat.conf \ - health.d/megacli.conf \ - health.d/memcached.conf \ - health.d/memory.conf \ - health.d/mongodb.conf \ - health.d/mysql.conf \ - health.d/named.conf \ - health.d/net.conf \ - health.d/netfilter.conf \ - health.d/nginx.conf \ - health.d/nginx_plus.conf \ - health.d/portcheck.conf \ - health.d/postgres.conf \ - health.d/qos.conf \ - health.d/ram.conf \ - health.d/redis.conf \ - health.d/retroshare.conf \ - health.d/softnet.conf \ - health.d/squid.conf \ - health.d/stiebeleltron.conf \ - health.d/swap.conf \ - health.d/tcp_conn.conf \ - health.d/tcp_listen.conf \ - health.d/tcp_mem.conf \ - health.d/tcp_orphans.conf \ - health.d/tcp_resets.conf \ - health.d/udp_errors.conf \ - health.d/varnish.conf \ - health.d/web_log.conf \ - health.d/zfs.conf \ - $(NULL) - -chartsconfigdir=$(libconfigdir)/charts.d -dist_chartsconfig_DATA = \ - charts.d/apache.conf \ - charts.d/apcupsd.conf \ - charts.d/cpufreq.conf \ - charts.d/exim.conf \ - charts.d/libreswan.conf \ - charts.d/load_average.conf \ - charts.d/mysql.conf \ - charts.d/nut.conf \ - charts.d/phpfpm.conf \ - charts.d/sensors.conf \ - charts.d/tomcat.conf \ - charts.d/ap.conf \ - charts.d/cpu_apps.conf \ - charts.d/example.conf \ - charts.d/hddtemp.conf \ - charts.d/mem_apps.conf \ - charts.d/nginx.conf \ - charts.d/opensips.conf \ - charts.d/postfix.conf \ - charts.d/squid.conf \ - $(NULL) - -statsdconfigdir=$(libconfigdir)/statsd.d -dist_statsdconfig_DATA = \ - statsd.d/example.conf \ - $(NULL) diff --git a/conf.d/node.d/README.md b/conf.d/node.d/README.md deleted file mode 100644 index 45e3d02a6b..0000000000 --- a/conf.d/node.d/README.md +++ /dev/null @@ -1,7 +0,0 @@ -`node.d.plugin` modules accept configuration in JSON format. - -Unfortunately, JSON files do not accept comments. So, the best way to describe them is to have markdown text files with instructions. - -JSON has a very strict formatting. If you get errors from netdata at `/var/log/netdata/error.log` that a certain configuration file cannot be loaded, we suggest to verify it at [http://jsonlint.com/](http://jsonlint.com/). - -The files in this directory, provide usable examples for configuring each `node.d.plugin` module. diff --git a/configure.ac b/configure.ac index c43b7eec03..b4e408a84f 100644 --- a/configure.ac +++ b/configure.ac @@ -36,7 +36,7 @@ AC_SUBST([PACKAGE_RPM_RELEASE]) AC_CONFIG_AUX_DIR([.]) AC_CONFIG_HEADERS([config.h]) AC_CONFIG_MACRO_DIR([build/m4]) -AC_CONFIG_SRCDIR([src/main.c]) +AC_CONFIG_SRCDIR([daemon/main.c]) define([AUTOMATE_INIT_OPTIONS], [tar-pax subdir-objects]) m4_ifdef([AM_SILENT_RULES], [ define([AUTOMATE_INIT_OPTIONS], [tar-pax silent-rules subdir-objects]) @@ -548,45 +548,65 @@ AC_SUBST([OPTIONAL_IPMIMONITORING_LIBS]) AC_CONFIG_FILES([ Makefile - charts.d/Makefile - conf.d/Makefile netdata.spec - python.d/Makefile - node.d/Makefile - plugins.d/Makefile - src/api/Makefile - src/backends/graphite/Makefile - src/backends/json/Makefile - src/backends/Makefile - src/backends/opentsdb/Makefile - src/backends/prometheus/Makefile - src/database/Makefile - src/health/Makefile - src/libnetdata/Makefile - src/Makefile - src/plugins/apps.plugin/Makefile - src/plugins/checks.plugin/Makefile - src/plugins/freebsd.plugin/Makefile - src/plugins/idlejitter.plugin/Makefile - src/plugins/linux-cgroups.plugin/Makefile - src/plugins/linux-diskspace.plugin/Makefile - src/plugins/linux-freeipmi.plugin/Makefile - src/plugins/linux-nfacct.plugin/Makefile - src/plugins/linux-proc.plugin/Makefile - src/plugins/linux-tc.plugin/Makefile - src/plugins/macos.plugin/Makefile - src/plugins/Makefile - src/plugins/plugins.d.plugin/Makefile - src/plugins/statsd.plugin/Makefile - src/registry/Makefile - src/streaming/Makefile - src/webserver/Makefile - system/Makefile - web/Makefile - diagrams/Makefile - makeself/Makefile + backends/graphite/Makefile + backends/json/Makefile + backends/Makefile + backends/opentsdb/Makefile + backends/prometheus/Makefile + collectors/Makefile + collectors/apps.plugin/Makefile + collectors/cgroups.plugin/Makefile + collectors/charts.d.plugin/Makefile + collectors/checks.plugin/Makefile + collectors/diskspace.plugin/Makefile + collectors/fping.plugin/Makefile + collectors/freebsd.plugin/Makefile + collectors/freeipmi.plugin/Makefile + collectors/idlejitter.plugin/Makefile + collectors/macos.plugin/Makefile + collectors/nfacct.plugin/Makefile + collectors/node.d.plugin/Makefile + collectors/plugins.d/Makefile + collectors/proc.plugin/Makefile + collectors/python.d.plugin/Makefile + collectors/statsd.plugin/Makefile + collectors/tc.plugin/Makefile contrib/Makefile + daemon/Makefile + database/Makefile + diagrams/Makefile + health/Makefile + libnetdata/Makefile + libnetdata/adaptive_resortable_list/Makefile + libnetdata/avl/Makefile + libnetdata/buffer/Makefile + libnetdata/clocks/Makefile + libnetdata/config/Makefile + libnetdata/dictionary/Makefile + libnetdata/eval/Makefile + libnetdata/locks/Makefile + libnetdata/log/Makefile + libnetdata/popen/Makefile + libnetdata/procfile/Makefile + libnetdata/simple_pattern/Makefile + libnetdata/socket/Makefile + libnetdata/statistical/Makefile + libnetdata/storage_number/Makefile + libnetdata/threads/Makefile + libnetdata/url/Makefile + makeself/Makefile + registry/Makefile + streaming/Makefile + system/Makefile tests/Makefile + web/Makefile + web/api/Makefile + web/gui/Makefile + web/server/Makefile + web/server/single/Makefile + web/server/multi/Makefile + web/server/static/Makefile ]) AC_OUTPUT diff --git a/contrib/Makefile.am b/contrib/Makefile.am index 8a94677d7b..80d80d3718 100644 --- a/contrib/Makefile.am +++ b/contrib/Makefile.am @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-3.0-or-later -MAINTAINERCLEANFILES= $(srcdir)/Makefile.in + +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in dist_noinst_DATA = \ README.md \ @@ -23,7 +24,6 @@ dist_noinst_DATA = \ dist_noinst_SCRIPTS = \ debian/netdata.init \ - nc-backend.sh \ $(NULL) debian/changelog: diff --git a/daemon/Makefile.am b/daemon/Makefile.am new file mode 100644 index 0000000000..bdd02774cd --- /dev/null +++ b/daemon/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES= $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/daemon/README.md b/daemon/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/common.c b/daemon/common.c similarity index 100% rename from src/common.c rename to daemon/common.c diff --git a/src/common.h b/daemon/common.h similarity index 89% rename from src/common.h rename to daemon/common.h index b513d59017..b313fdfdd0 100644 --- a/src/common.h +++ b/daemon/common.h @@ -3,7 +3,7 @@ #ifndef NETDATA_COMMON_H #define NETDATA_COMMON_H 1 -#include "libnetdata/libnetdata.h" +#include "../libnetdata/libnetdata.h" // ---------------------------------------------------------------------------- // netdata include files @@ -14,7 +14,7 @@ #include "database/rrd.h" // the netdata webserver(s) -#include "webserver/web_server.h" +#include "web/server/web_server.h" // streaming metrics between netdata servers #include "streaming/rrdpush.h" @@ -27,13 +27,13 @@ #include "registry/registry.h" // backends for archiving the metrics -#include "src/backends/backends.h" +#include "backends/backends.h" // the netdata API -#include "api/web_api_v1.h" +#include "web/api/web_api_v1.h" // all data collection plugins -#include "plugins/all.h" +#include "collectors/all.h" // netdata unit tests #include "unit_test.h" diff --git a/src/daemon.c b/daemon/daemon.c similarity index 100% rename from src/daemon.c rename to daemon/daemon.c diff --git a/src/daemon.h b/daemon/daemon.h similarity index 100% rename from src/daemon.h rename to daemon/daemon.h diff --git a/src/global_statistics.c b/daemon/global_statistics.c similarity index 100% rename from src/global_statistics.c rename to daemon/global_statistics.c diff --git a/src/global_statistics.h b/daemon/global_statistics.h similarity index 100% rename from src/global_statistics.h rename to daemon/global_statistics.h diff --git a/src/main.c b/daemon/main.c similarity index 100% rename from src/main.c rename to daemon/main.c diff --git a/src/main.h b/daemon/main.h similarity index 100% rename from src/main.h rename to daemon/main.h diff --git a/src/signals.c b/daemon/signals.c similarity index 100% rename from src/signals.c rename to daemon/signals.c diff --git a/src/signals.h b/daemon/signals.h similarity index 100% rename from src/signals.h rename to daemon/signals.h diff --git a/src/unit_test.c b/daemon/unit_test.c similarity index 100% rename from src/unit_test.c rename to daemon/unit_test.c diff --git a/src/unit_test.h b/daemon/unit_test.h similarity index 100% rename from src/unit_test.h rename to daemon/unit_test.h diff --git a/database/Makefile.am b/database/Makefile.am new file mode 100644 index 0000000000..19554bed8e --- /dev/null +++ b/database/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/database/README.md b/database/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/database/rrd.c b/database/rrd.c similarity index 100% rename from src/database/rrd.c rename to database/rrd.c diff --git a/src/database/rrd.h b/database/rrd.h similarity index 99% rename from src/database/rrd.h rename to database/rrd.h index 51f826c4ba..57d94c4c86 100644 --- a/src/database/rrd.h +++ b/database/rrd.h @@ -14,7 +14,7 @@ typedef struct rrdcalc RRDCALC; typedef struct rrdcalctemplate RRDCALCTEMPLATE; typedef struct alarm_entry ALARM_ENTRY; -#include "../common.h" +#include "../daemon/common.h" #include "rrdvar.h" #include "rrdsetvar.h" diff --git a/src/database/rrdcalc.c b/database/rrdcalc.c similarity index 100% rename from src/database/rrdcalc.c rename to database/rrdcalc.c diff --git a/src/database/rrdcalc.h b/database/rrdcalc.h similarity index 100% rename from src/database/rrdcalc.h rename to database/rrdcalc.h diff --git a/src/database/rrdcalctemplate.c b/database/rrdcalctemplate.c similarity index 100% rename from src/database/rrdcalctemplate.c rename to database/rrdcalctemplate.c diff --git a/src/database/rrdcalctemplate.h b/database/rrdcalctemplate.h similarity index 100% rename from src/database/rrdcalctemplate.h rename to database/rrdcalctemplate.h diff --git a/src/database/rrddim.c b/database/rrddim.c similarity index 100% rename from src/database/rrddim.c rename to database/rrddim.c diff --git a/src/database/rrddimvar.c b/database/rrddimvar.c similarity index 100% rename from src/database/rrddimvar.c rename to database/rrddimvar.c diff --git a/src/database/rrddimvar.h b/database/rrddimvar.h similarity index 100% rename from src/database/rrddimvar.h rename to database/rrddimvar.h diff --git a/src/database/rrdfamily.c b/database/rrdfamily.c similarity index 100% rename from src/database/rrdfamily.c rename to database/rrdfamily.c diff --git a/src/database/rrdhost.c b/database/rrdhost.c similarity index 100% rename from src/database/rrdhost.c rename to database/rrdhost.c diff --git a/src/database/rrdset.c b/database/rrdset.c similarity index 100% rename from src/database/rrdset.c rename to database/rrdset.c diff --git a/src/database/rrdsetvar.c b/database/rrdsetvar.c similarity index 100% rename from src/database/rrdsetvar.c rename to database/rrdsetvar.c diff --git a/src/database/rrdsetvar.h b/database/rrdsetvar.h similarity index 100% rename from src/database/rrdsetvar.h rename to database/rrdsetvar.h diff --git a/src/database/rrdvar.c b/database/rrdvar.c similarity index 100% rename from src/database/rrdvar.c rename to database/rrdvar.c diff --git a/src/database/rrdvar.h b/database/rrdvar.h similarity index 100% rename from src/database/rrdvar.h rename to database/rrdvar.h diff --git a/health/Makefile.am b/health/Makefile.am new file mode 100644 index 0000000000..6f09b2e25f --- /dev/null +++ b/health/Makefile.am @@ -0,0 +1,87 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +CLEANFILES = \ + alarm-notify.sh \ + $(NULL) + +include $(top_srcdir)/build/subst.inc +SUFFIXES = .in + +dist_libconfig_DATA = \ + health_alarm_notify.conf \ + health_email_recipients.conf \ + $(NULL) + +dist_plugins_SCRIPTS = \ + alarm-notify.sh \ + alarm-email.sh \ + alarm-test.sh \ + $(NULL) + +dist_noinst_DATA = \ + alarm-notify.sh.in \ + README.md \ + $(NULL) + +healthconfigdir=$(libconfigdir)/health.d +dist_healthconfig_DATA = \ + health.d/apache.conf \ + health.d/apcupsd.conf \ + health.d/backend.conf \ + health.d/bcache.conf \ + health.d/beanstalkd.conf \ + health.d/bind_rndc.conf \ + health.d/boinc.conf \ + health.d/btrfs.conf \ + health.d/ceph.conf \ + health.d/cpu.conf \ + health.d/couchdb.conf \ + health.d/disks.conf \ + health.d/dockerd.conf \ + health.d/elasticsearch.conf \ + health.d/entropy.conf \ + health.d/fping.conf \ + health.d/fronius.conf \ + health.d/haproxy.conf \ + health.d/httpcheck.conf \ + health.d/ipc.conf \ + health.d/ipfs.conf \ + health.d/ipmi.conf \ + health.d/isc_dhcpd.conf \ + health.d/lighttpd.conf \ + health.d/linux_power_supply.conf \ + health.d/load.conf \ + health.d/mdstat.conf \ + health.d/megacli.conf \ + health.d/memcached.conf \ + health.d/memory.conf \ + health.d/mongodb.conf \ + health.d/mysql.conf \ + health.d/named.conf \ + health.d/net.conf \ + health.d/netfilter.conf \ + health.d/nginx.conf \ + health.d/nginx_plus.conf \ + health.d/portcheck.conf \ + health.d/postgres.conf \ + health.d/qos.conf \ + health.d/ram.conf \ + health.d/redis.conf \ + health.d/retroshare.conf \ + health.d/softnet.conf \ + health.d/squid.conf \ + health.d/stiebeleltron.conf \ + health.d/swap.conf \ + health.d/tcp_conn.conf \ + health.d/tcp_listen.conf \ + health.d/tcp_mem.conf \ + health.d/tcp_orphans.conf \ + health.d/tcp_resets.conf \ + health.d/udp_errors.conf \ + health.d/varnish.conf \ + health.d/web_log.conf \ + health.d/zfs.conf \ + $(NULL) diff --git a/health/README.md b/health/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins.d/alarm-email.sh b/health/alarm-email.sh similarity index 100% rename from plugins.d/alarm-email.sh rename to health/alarm-email.sh diff --git a/plugins.d/alarm-notify.sh.in b/health/alarm-notify.sh.in similarity index 100% rename from plugins.d/alarm-notify.sh.in rename to health/alarm-notify.sh.in diff --git a/plugins.d/alarm-test.sh b/health/alarm-test.sh similarity index 100% rename from plugins.d/alarm-test.sh rename to health/alarm-test.sh diff --git a/src/health/health.c b/health/health.c similarity index 100% rename from src/health/health.c rename to health/health.c diff --git a/conf.d/health.d/apache.conf b/health/health.d/apache.conf similarity index 100% rename from conf.d/health.d/apache.conf rename to health/health.d/apache.conf diff --git a/conf.d/health.d/apcupsd.conf b/health/health.d/apcupsd.conf similarity index 100% rename from conf.d/health.d/apcupsd.conf rename to health/health.d/apcupsd.conf diff --git a/conf.d/health.d/backend.conf b/health/health.d/backend.conf similarity index 100% rename from conf.d/health.d/backend.conf rename to health/health.d/backend.conf diff --git a/conf.d/health.d/bcache.conf b/health/health.d/bcache.conf similarity index 100% rename from conf.d/health.d/bcache.conf rename to health/health.d/bcache.conf diff --git a/conf.d/health.d/beanstalkd.conf b/health/health.d/beanstalkd.conf similarity index 100% rename from conf.d/health.d/beanstalkd.conf rename to health/health.d/beanstalkd.conf diff --git a/conf.d/health.d/bind_rndc.conf b/health/health.d/bind_rndc.conf similarity index 100% rename from conf.d/health.d/bind_rndc.conf rename to health/health.d/bind_rndc.conf diff --git a/conf.d/health.d/boinc.conf b/health/health.d/boinc.conf similarity index 100% rename from conf.d/health.d/boinc.conf rename to health/health.d/boinc.conf diff --git a/conf.d/health.d/btrfs.conf b/health/health.d/btrfs.conf similarity index 100% rename from conf.d/health.d/btrfs.conf rename to health/health.d/btrfs.conf diff --git a/conf.d/health.d/ceph.conf b/health/health.d/ceph.conf similarity index 100% rename from conf.d/health.d/ceph.conf rename to health/health.d/ceph.conf diff --git a/conf.d/health.d/couchdb.conf b/health/health.d/couchdb.conf similarity index 100% rename from conf.d/health.d/couchdb.conf rename to health/health.d/couchdb.conf diff --git a/conf.d/health.d/cpu.conf b/health/health.d/cpu.conf similarity index 100% rename from conf.d/health.d/cpu.conf rename to health/health.d/cpu.conf diff --git a/conf.d/health.d/disks.conf b/health/health.d/disks.conf similarity index 100% rename from conf.d/health.d/disks.conf rename to health/health.d/disks.conf diff --git a/conf.d/health.d/dockerd.conf b/health/health.d/dockerd.conf similarity index 100% rename from conf.d/health.d/dockerd.conf rename to health/health.d/dockerd.conf diff --git a/conf.d/health.d/elasticsearch.conf b/health/health.d/elasticsearch.conf similarity index 100% rename from conf.d/health.d/elasticsearch.conf rename to health/health.d/elasticsearch.conf diff --git a/conf.d/health.d/entropy.conf b/health/health.d/entropy.conf similarity index 100% rename from conf.d/health.d/entropy.conf rename to health/health.d/entropy.conf diff --git a/conf.d/health.d/fping.conf b/health/health.d/fping.conf similarity index 100% rename from conf.d/health.d/fping.conf rename to health/health.d/fping.conf diff --git a/conf.d/health.d/fronius.conf b/health/health.d/fronius.conf similarity index 100% rename from conf.d/health.d/fronius.conf rename to health/health.d/fronius.conf diff --git a/conf.d/health.d/haproxy.conf b/health/health.d/haproxy.conf similarity index 100% rename from conf.d/health.d/haproxy.conf rename to health/health.d/haproxy.conf diff --git a/conf.d/health.d/httpcheck.conf b/health/health.d/httpcheck.conf similarity index 100% rename from conf.d/health.d/httpcheck.conf rename to health/health.d/httpcheck.conf diff --git a/conf.d/health.d/ipc.conf b/health/health.d/ipc.conf similarity index 100% rename from conf.d/health.d/ipc.conf rename to health/health.d/ipc.conf diff --git a/conf.d/health.d/ipfs.conf b/health/health.d/ipfs.conf similarity index 100% rename from conf.d/health.d/ipfs.conf rename to health/health.d/ipfs.conf diff --git a/conf.d/health.d/ipmi.conf b/health/health.d/ipmi.conf similarity index 100% rename from conf.d/health.d/ipmi.conf rename to health/health.d/ipmi.conf diff --git a/conf.d/health.d/isc_dhcpd.conf b/health/health.d/isc_dhcpd.conf similarity index 100% rename from conf.d/health.d/isc_dhcpd.conf rename to health/health.d/isc_dhcpd.conf diff --git a/conf.d/health.d/lighttpd.conf b/health/health.d/lighttpd.conf similarity index 100% rename from conf.d/health.d/lighttpd.conf rename to health/health.d/lighttpd.conf diff --git a/conf.d/health.d/linux_power_supply.conf b/health/health.d/linux_power_supply.conf similarity index 100% rename from conf.d/health.d/linux_power_supply.conf rename to health/health.d/linux_power_supply.conf diff --git a/conf.d/health.d/load.conf b/health/health.d/load.conf similarity index 100% rename from conf.d/health.d/load.conf rename to health/health.d/load.conf diff --git a/conf.d/health.d/mdstat.conf b/health/health.d/mdstat.conf similarity index 100% rename from conf.d/health.d/mdstat.conf rename to health/health.d/mdstat.conf diff --git a/conf.d/health.d/megacli.conf b/health/health.d/megacli.conf similarity index 100% rename from conf.d/health.d/megacli.conf rename to health/health.d/megacli.conf diff --git a/conf.d/health.d/memcached.conf b/health/health.d/memcached.conf similarity index 100% rename from conf.d/health.d/memcached.conf rename to health/health.d/memcached.conf diff --git a/conf.d/health.d/memory.conf b/health/health.d/memory.conf similarity index 100% rename from conf.d/health.d/memory.conf rename to health/health.d/memory.conf diff --git a/conf.d/health.d/mongodb.conf b/health/health.d/mongodb.conf similarity index 100% rename from conf.d/health.d/mongodb.conf rename to health/health.d/mongodb.conf diff --git a/conf.d/health.d/mysql.conf b/health/health.d/mysql.conf similarity index 100% rename from conf.d/health.d/mysql.conf rename to health/health.d/mysql.conf diff --git a/conf.d/health.d/named.conf b/health/health.d/named.conf similarity index 100% rename from conf.d/health.d/named.conf rename to health/health.d/named.conf diff --git a/conf.d/health.d/net.conf b/health/health.d/net.conf similarity index 100% rename from conf.d/health.d/net.conf rename to health/health.d/net.conf diff --git a/conf.d/health.d/netfilter.conf b/health/health.d/netfilter.conf similarity index 100% rename from conf.d/health.d/netfilter.conf rename to health/health.d/netfilter.conf diff --git a/conf.d/health.d/nginx.conf b/health/health.d/nginx.conf similarity index 100% rename from conf.d/health.d/nginx.conf rename to health/health.d/nginx.conf diff --git a/conf.d/health.d/nginx_plus.conf b/health/health.d/nginx_plus.conf similarity index 100% rename from conf.d/health.d/nginx_plus.conf rename to health/health.d/nginx_plus.conf diff --git a/conf.d/health.d/portcheck.conf b/health/health.d/portcheck.conf similarity index 100% rename from conf.d/health.d/portcheck.conf rename to health/health.d/portcheck.conf diff --git a/conf.d/health.d/postgres.conf b/health/health.d/postgres.conf similarity index 100% rename from conf.d/health.d/postgres.conf rename to health/health.d/postgres.conf diff --git a/conf.d/health.d/qos.conf b/health/health.d/qos.conf similarity index 100% rename from conf.d/health.d/qos.conf rename to health/health.d/qos.conf diff --git a/conf.d/health.d/ram.conf b/health/health.d/ram.conf similarity index 100% rename from conf.d/health.d/ram.conf rename to health/health.d/ram.conf diff --git a/conf.d/health.d/redis.conf b/health/health.d/redis.conf similarity index 100% rename from conf.d/health.d/redis.conf rename to health/health.d/redis.conf diff --git a/conf.d/health.d/retroshare.conf b/health/health.d/retroshare.conf similarity index 100% rename from conf.d/health.d/retroshare.conf rename to health/health.d/retroshare.conf diff --git a/conf.d/health.d/softnet.conf b/health/health.d/softnet.conf similarity index 100% rename from conf.d/health.d/softnet.conf rename to health/health.d/softnet.conf diff --git a/conf.d/health.d/squid.conf b/health/health.d/squid.conf similarity index 100% rename from conf.d/health.d/squid.conf rename to health/health.d/squid.conf diff --git a/conf.d/health.d/stiebeleltron.conf b/health/health.d/stiebeleltron.conf similarity index 100% rename from conf.d/health.d/stiebeleltron.conf rename to health/health.d/stiebeleltron.conf diff --git a/conf.d/health.d/swap.conf b/health/health.d/swap.conf similarity index 100% rename from conf.d/health.d/swap.conf rename to health/health.d/swap.conf diff --git a/conf.d/health.d/tcp_conn.conf b/health/health.d/tcp_conn.conf similarity index 100% rename from conf.d/health.d/tcp_conn.conf rename to health/health.d/tcp_conn.conf diff --git a/conf.d/health.d/tcp_listen.conf b/health/health.d/tcp_listen.conf similarity index 100% rename from conf.d/health.d/tcp_listen.conf rename to health/health.d/tcp_listen.conf diff --git a/conf.d/health.d/tcp_mem.conf b/health/health.d/tcp_mem.conf similarity index 100% rename from conf.d/health.d/tcp_mem.conf rename to health/health.d/tcp_mem.conf diff --git a/conf.d/health.d/tcp_orphans.conf b/health/health.d/tcp_orphans.conf similarity index 100% rename from conf.d/health.d/tcp_orphans.conf rename to health/health.d/tcp_orphans.conf diff --git a/conf.d/health.d/tcp_resets.conf b/health/health.d/tcp_resets.conf similarity index 100% rename from conf.d/health.d/tcp_resets.conf rename to health/health.d/tcp_resets.conf diff --git a/conf.d/health.d/udp_errors.conf b/health/health.d/udp_errors.conf similarity index 100% rename from conf.d/health.d/udp_errors.conf rename to health/health.d/udp_errors.conf diff --git a/conf.d/health.d/varnish.conf b/health/health.d/varnish.conf similarity index 100% rename from conf.d/health.d/varnish.conf rename to health/health.d/varnish.conf diff --git a/conf.d/health.d/web_log.conf b/health/health.d/web_log.conf similarity index 100% rename from conf.d/health.d/web_log.conf rename to health/health.d/web_log.conf diff --git a/conf.d/health.d/zfs.conf b/health/health.d/zfs.conf similarity index 100% rename from conf.d/health.d/zfs.conf rename to health/health.d/zfs.conf diff --git a/src/health/health.h b/health/health.h similarity index 98% rename from src/health/health.h rename to health/health.h index cdd1d23623..ff7a4d9bf1 100644 --- a/src/health/health.h +++ b/health/health.h @@ -3,7 +3,7 @@ #ifndef NETDATA_HEALTH_H #define NETDATA_HEALTH_H 1 -#include "src/common.h" +#include "../daemon/common.h" #define NETDATA_PLUGIN_HOOK_HEALTH \ { \ diff --git a/conf.d/health_alarm_notify.conf b/health/health_alarm_notify.conf similarity index 100% rename from conf.d/health_alarm_notify.conf rename to health/health_alarm_notify.conf diff --git a/src/health/health_config.c b/health/health_config.c similarity index 100% rename from src/health/health_config.c rename to health/health_config.c diff --git a/conf.d/health_email_recipients.conf b/health/health_email_recipients.conf similarity index 100% rename from conf.d/health_email_recipients.conf rename to health/health_email_recipients.conf diff --git a/src/health/health_json.c b/health/health_json.c similarity index 100% rename from src/health/health_json.c rename to health/health_json.c diff --git a/src/health/health_log.c b/health/health_log.c similarity index 100% rename from src/health/health_log.c rename to health/health_log.c diff --git a/installer/.keep b/installer/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/libnetdata/Makefile.am b/libnetdata/Makefile.am new file mode 100644 index 0000000000..d2710f0a3e --- /dev/null +++ b/libnetdata/Makefile.am @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +SUBDIRS = \ + adaptive_resortable_list \ + avl \ + buffer \ + clocks \ + config \ + dictionary \ + eval \ + locks \ + log \ + popen \ + procfile \ + simple_pattern \ + socket \ + statistical \ + storage_number \ + threads \ + url \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/README.md b/libnetdata/README.md new file mode 100644 index 0000000000..545f959849 --- /dev/null +++ b/libnetdata/README.md @@ -0,0 +1,6 @@ +# libnetdata + +`libnetdata` is a collection of library code that is used by all netdata `C` programs. + + + diff --git a/libnetdata/adaptive_resortable_list/Makefile.am b/libnetdata/adaptive_resortable_list/Makefile.am new file mode 100644 index 0000000000..1cb69ed99a --- /dev/null +++ b/libnetdata/adaptive_resortable_list/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/adaptive_resortable_list/README.md b/libnetdata/adaptive_resortable_list/README.md new file mode 100644 index 0000000000..b7148b19d4 --- /dev/null +++ b/libnetdata/adaptive_resortable_list/README.md @@ -0,0 +1,89 @@ + +# Adaptive Re-sortable List (ARL) + +This library allows netdata to read a series of `name - value` pairs +in the **fastest possible way**. + +ARLs are used all over netdata, as they are the most +CPU utilization efficient way to process `/proc` files. They are used to +process both vertical (csv like) and horizontal (one pair per line) `name - value` pairs. + +## How ARL works + +It maintains a linked list of all `NAME` (keywords), sorted in the +order found in the data source. The linked list is kept +sorted at all times - the data source may change at any time, the +linked list will adapt at the next iteration. + +### Initialization + +During initialization (just once), the caller: + +- calls `arl_create()` to create the ARL + +- calls `arl_expect()` multiple times to register the expected keywords + +The library will call the `processor()` function (given to +`arl_create()`), for each expected keyword found. +The default `processor()` expects `dst` to be an `unsigned long long *`. + +Each `name` keyword may have a different `processor()` (by calling +`arl_expect_custom()` instead of `arl_expect()`). + +### Data collection iterations + +For each iteration through the data source, the caller: + +- calls `arl_begin()` to initiate a data collection iteration. + This is to be called just ONCE every time the source is re-evaluated. + +- calls `arl_check()` for each entry read from the file. + +### Cleanup + +When the caller exits: + +- calls `arl_free()` to destroy this and free all memory. + +### Performance + +ARL maintains a list of `name` keywords found in the data source (even the ones +that are not useful for data collection). + +If the data source maintains the same order on the `name-value` pairs, for each +each call to `arl_check()` only an `strcmp()` is executed to verify the +expected order has not changed, a counter is incremented and a pointer is changed. +So, if the data source has 100 `name-value` pairs, and their order remains constant +over time, 100 successful `strcmp()` are executed. + +In the unlikely event that an iteration sees the data source with a different order, +for each out-of-order keyword, a full search of the remaining keywords is made. But +this search uses 32bit hashes, not string comparisons, so it should also be fast. + +When all expectations are satisfied (even in the middle of an iteration), +the call to `arl_check()` will return 1, to signal the caller to stop the loop, +saving valuable CPU resources for the rest of the data source. + +In the following test we used alternative methods to process, **1M times**, +a data source like `/proc/meminfo`, already tokenized, in memory, +to extract the same number of expected metrics: + +test|code|string comparison|number parsing|duration +:---:|:---:|:---:|:---:|:---:| +1|if-else-if-else-if|`strcmp()`|`strtoull()`|4698657 usecs +2|if-else-if-else-if|inline `simple_hash()` and `strcmp()`|`strtoull()`| 872005 usecs +3|if-else-if-else-if|statement expression `simple_hash()` and `strcmp()`|`strtoull()`|861626 usecs +4|if-continue|inline `simple_hash()` and `strcmp()`|`strtoull()`|871887 usecs +5|if-else-if-else-if|inline `simple_hash()` and `strcmp()`|`str2ull()`|606541 usecs +6|ARL|ARL|`strtoull()`|424149 usecs +7|ARL|ARL|`str2ull()`|199324 usecs + +So, compared to unoptimized code (test No 1: 4.7sec), before ARL netdata was using test +No **5** with hashing and a custom `str2ull()` to achieve 607ms. +The current ARL implementation is test No **7** that needs only 199ms +(23 times faster vs unoptimized code, 3 times faster vs optimized code). + +## Limitations + +Do not use ARL if the a name/keyword may appear more than once in the +source data. diff --git a/src/libnetdata/adaptive_resortable_list.c b/libnetdata/adaptive_resortable_list/adaptive_resortable_list.c similarity index 99% rename from src/libnetdata/adaptive_resortable_list.c rename to libnetdata/adaptive_resortable_list/adaptive_resortable_list.c index 71a80ea14b..7f4c6c53d9 100644 --- a/src/libnetdata/adaptive_resortable_list.c +++ b/libnetdata/adaptive_resortable_list/adaptive_resortable_list.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "libnetdata.h" +#include "../libnetdata.h" // the default processor() of the ARL // can be overwritten at arl_create() diff --git a/src/libnetdata/adaptive_resortable_list.h b/libnetdata/adaptive_resortable_list/adaptive_resortable_list.h similarity index 82% rename from src/libnetdata/adaptive_resortable_list.h rename to libnetdata/adaptive_resortable_list/adaptive_resortable_list.h index 409e2c2c97..011ee73d98 100644 --- a/src/libnetdata/adaptive_resortable_list.h +++ b/libnetdata/adaptive_resortable_list/adaptive_resortable_list.h @@ -1,46 +1,10 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "libnetdata.h" +#include "../libnetdata.h" #ifndef NETDATA_ADAPTIVE_RESORTABLE_LIST_H #define NETDATA_ADAPTIVE_RESORTABLE_LIST_H 1 -/* - * ADAPTIVE RE-SORTABLE LIST - * This structure allows netdata to read a file of NAME VALUE lines - * in the fastest possible way. - * - * It maintains a linked list of all NAME (keywords), sorted in the - * same order as found in the source data file. - * The linked list is kept sorted at all times - the source file - * may change at any time, the list will adapt. - * - * The caller: - * - * 1. calls arl_create() to create a list - * - * 2. calls arl_expect() to register the expected keyword - * - * Then: - * - * 3. calls arl_begin() to initiate a data collection iteration. - * This is to be called just ONCE every time the source is re-scanned. - * - * 4. calls arl_check() for each line read from the file. - * - * Finally: - * - * 5. calls arl_free() to destroy this and free all memory. - * - * The program will call the processor() function, given to - * arl_create(), for each expected keyword found. - * The default processor() expects dst to be an unsigned long long *. - * - * LIMITATIONS - * DO NOT USE THIS IF THE A NAME/KEYWORD MAY APPEAR MORE THAN - * ONCE IN THE SOURCE DATA SET. - */ - #define ARL_ENTRY_FLAG_FOUND 0x01 // the entry has been found in the source data #define ARL_ENTRY_FLAG_EXPECTED 0x02 // the entry is expected by the program #define ARL_ENTRY_FLAG_DYNAMIC 0x04 // the entry was dynamically allocated, from source data diff --git a/libnetdata/avl/Makefile.am b/libnetdata/avl/Makefile.am new file mode 100644 index 0000000000..1cb69ed99a --- /dev/null +++ b/libnetdata/avl/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/avl/README.md b/libnetdata/avl/README.md new file mode 100644 index 0000000000..48212a7157 --- /dev/null +++ b/libnetdata/avl/README.md @@ -0,0 +1,11 @@ +# AVL + +AVL is a library indexing objects in B-Trees. + +`avl_insert()`, `avl_remove()` and `avl_search()` are adaptations +of the AVL algorithm found in `libavl` v2.0.3, so that they do not +use any memory allocations and their memory footprint is optimized +(by eliminating non-necessary data members). + +In addition to the above, this version of AVL, provides versions using locks +and traversal functions. \ No newline at end of file diff --git a/src/libnetdata/avl.c b/libnetdata/avl/avl.c similarity index 99% rename from src/libnetdata/avl.c rename to libnetdata/avl/avl.c index 41fd1828e4..c44bef307c 100644 --- a/src/libnetdata/avl.c +++ b/libnetdata/avl/avl.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: LGPL-3.0-or-later -#include "libnetdata.h" +#include "../libnetdata.h" /* ------------------------------------------------------------------------- */ /* diff --git a/src/libnetdata/avl.h b/libnetdata/avl/avl.h similarity index 98% rename from src/libnetdata/avl.h rename to libnetdata/avl/avl.h index 24e879c350..070bb3d3d6 100644 --- a/src/libnetdata/avl.h +++ b/libnetdata/avl/avl.h @@ -3,8 +3,7 @@ #ifndef _AVL_H #define _AVL_H 1 -#include "libnetdata.h" - +#include "../libnetdata.h" /* Maximum AVL tree height. */ #ifndef AVL_MAX_HEIGHT diff --git a/libnetdata/buffer/Makefile.am b/libnetdata/buffer/Makefile.am new file mode 100644 index 0000000000..1cb69ed99a --- /dev/null +++ b/libnetdata/buffer/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/buffer/README.md b/libnetdata/buffer/README.md new file mode 100644 index 0000000000..a7cfef89d4 --- /dev/null +++ b/libnetdata/buffer/README.md @@ -0,0 +1,11 @@ +# BUFFER + +`BUFFER` is a convenience library for working with strings in `C`. +Mainly, `BUFFER`s eliminate the need for tracking the string length, thus providing +a safe alternative for string operations. + +Also, they are super fast in printing and appending data to the string and its `buffer_strlen()` +is just a lookup (it does not traverse the string). + +Netdata uses `BUFFER`s for preparing web responses and buffering data to be sent upstream or +to backend databases. \ No newline at end of file diff --git a/src/libnetdata/web_buffer.c b/libnetdata/buffer/buffer.c similarity index 99% rename from src/libnetdata/web_buffer.c rename to libnetdata/buffer/buffer.c index 5c3f23dbbf..8ba7d9910d 100644 --- a/src/libnetdata/web_buffer.c +++ b/libnetdata/buffer/buffer.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "libnetdata.h" +#include "../libnetdata.h" #define BUFFER_OVERFLOW_EOF "EOF" diff --git a/src/libnetdata/web_buffer.h b/libnetdata/buffer/buffer.h similarity index 99% rename from src/libnetdata/web_buffer.h rename to libnetdata/buffer/buffer.h index 8daed841b9..8e431bfd53 100644 --- a/src/libnetdata/web_buffer.h +++ b/libnetdata/buffer/buffer.h @@ -3,7 +3,7 @@ #ifndef NETDATA_WEB_BUFFER_H #define NETDATA_WEB_BUFFER_H 1 -#include "libnetdata.h" +#include "../libnetdata.h" #define WEB_DATA_LENGTH_INCREASE_STEP 1024 diff --git a/libnetdata/clocks/Makefile.am b/libnetdata/clocks/Makefile.am new file mode 100644 index 0000000000..1cb69ed99a --- /dev/null +++ b/libnetdata/clocks/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/clocks/README.md b/libnetdata/clocks/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/libnetdata/clocks.c b/libnetdata/clocks/clocks.c similarity index 99% rename from src/libnetdata/clocks.c rename to libnetdata/clocks/clocks.c index ffff3a92f4..e644aeeb09 100644 --- a/src/libnetdata/clocks.c +++ b/libnetdata/clocks/clocks.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "libnetdata.h" +#include "../libnetdata.h" #ifndef HAVE_CLOCK_GETTIME inline int clock_gettime(clockid_t clk_id, struct timespec *ts) { diff --git a/src/libnetdata/clocks.h b/libnetdata/clocks/clocks.h similarity index 99% rename from src/libnetdata/clocks.h rename to libnetdata/clocks/clocks.h index 5b894ac30b..c66dda4369 100644 --- a/src/libnetdata/clocks.h +++ b/libnetdata/clocks/clocks.h @@ -3,7 +3,7 @@ #ifndef NETDATA_CLOCKS_H #define NETDATA_CLOCKS_H 1 -#include "libnetdata.h" +#include "../libnetdata.h" #ifndef HAVE_STRUCT_TIMESPEC struct timespec { diff --git a/libnetdata/config/Makefile.am b/libnetdata/config/Makefile.am new file mode 100644 index 0000000000..1cb69ed99a --- /dev/null +++ b/libnetdata/config/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/config/README.md b/libnetdata/config/README.md new file mode 100644 index 0000000000..5e170caa98 --- /dev/null +++ b/libnetdata/config/README.md @@ -0,0 +1,46 @@ +# netdata ini config files + +Configuration files `netdata.conf` and `stream.conf` are netdata ini files. + +## Motivation + +The whole idea came up when we were evaluating the documentation involved +in maintaining a complex configuration system. Our intention was to give +configuration options for everything imaginable. But then, documenting all +these options would require a tremendous amount of time, users would have +to search through endless pages for the option they need, etc. + +We concluded then that **configuring software like that is a waste of time +and effort**. Of course there must be plenty of configuration options, but +the implementation itself should require a lot less effort for both the +developers and the users. + +So, we did this: + +1. No configuration is required to run netdata +2. There are plenty of options to tweak +3. There is minimal documentation (or no at all) + +## Why this works? + +The configuration file is a `name = value` dictionary with `[sections]`. +Write whatever you like there as long as it follows this simple format. + +Netdata loads this dictionary and then when the code needs a value from +it, it just looks up the `name` in the dictionary at the proper `section`. +In all places, in the code, there are both the `names` and their +`default values`, so if something is not found in the configuration +file, the default is used. The lookup is made using B-Trees and hashes +(no string comparisons), so they are super fast. Also the `names` of the +settings can be `my super duper setting that once set to yes, will turn the world upside down = no` +- so goodbye to most of the documentation involved. + +Next, netdata can generate a valid configuration for the user to edit. +No need to remember anything or copy and paste settings. Just get the +configuration from the server (`/netdata.conf` on your netdata server), +edit it and save it. + +Last, what about options you believe you have set, but you misspelled? +When you get the configuration file from the server, there will be a +comment above all `name = value` pairs the server does not use. +So you know that whatever you wrote there, is not used. diff --git a/src/libnetdata/appconfig.c b/libnetdata/config/appconfig.c similarity index 99% rename from src/libnetdata/appconfig.c rename to libnetdata/config/appconfig.c index d2442ccbec..079891725b 100644 --- a/src/libnetdata/appconfig.c +++ b/libnetdata/config/appconfig.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "libnetdata.h" +#include "../libnetdata.h" #define CONFIG_FILE_LINE_MAX ((CONFIG_MAX_NAME + CONFIG_MAX_VALUE + 1024) * 2) diff --git a/src/libnetdata/appconfig.h b/libnetdata/config/appconfig.h similarity index 99% rename from src/libnetdata/appconfig.h rename to libnetdata/config/appconfig.h index 0c40173e2c..bd37171e6c 100644 --- a/src/libnetdata/appconfig.h +++ b/libnetdata/config/appconfig.h @@ -78,7 +78,7 @@ #ifndef NETDATA_CONFIG_H #define NETDATA_CONFIG_H 1 -#include "libnetdata.h" +#include "../libnetdata.h" #define CONFIG_FILENAME "netdata.conf" diff --git a/libnetdata/dictionary/Makefile.am b/libnetdata/dictionary/Makefile.am new file mode 100644 index 0000000000..1cb69ed99a --- /dev/null +++ b/libnetdata/dictionary/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/dictionary/README.md b/libnetdata/dictionary/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/libnetdata/dictionary.c b/libnetdata/dictionary/dictionary.c similarity index 99% rename from src/libnetdata/dictionary.c rename to libnetdata/dictionary/dictionary.c index e0077c4b02..dd94a801dc 100644 --- a/src/libnetdata/dictionary.c +++ b/libnetdata/dictionary/dictionary.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "libnetdata.h" +#include "../libnetdata.h" // ---------------------------------------------------------------------------- // dictionary statistics diff --git a/src/libnetdata/dictionary.h b/libnetdata/dictionary/dictionary.h similarity index 98% rename from src/libnetdata/dictionary.h rename to libnetdata/dictionary/dictionary.h index 9334c14544..61b9bfc615 100644 --- a/src/libnetdata/dictionary.h +++ b/libnetdata/dictionary/dictionary.h @@ -3,7 +3,7 @@ #ifndef NETDATA_DICTIONARY_H #define NETDATA_DICTIONARY_H 1 -#include "libnetdata.h" +#include "../libnetdata.h" struct dictionary_stats { unsigned long long inserts; diff --git a/libnetdata/eval/Makefile.am b/libnetdata/eval/Makefile.am new file mode 100644 index 0000000000..1cb69ed99a --- /dev/null +++ b/libnetdata/eval/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/eval/README.md b/libnetdata/eval/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/libnetdata/eval.c b/libnetdata/eval/eval.c similarity index 99% rename from src/libnetdata/eval.c rename to libnetdata/eval/eval.c index e0faf14691..0316edac0c 100644 --- a/src/libnetdata/eval.c +++ b/libnetdata/eval/eval.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "libnetdata.h" +#include "../libnetdata.h" // ---------------------------------------------------------------------------- // data structures for storing the parsed expression in memory diff --git a/src/libnetdata/eval.h b/libnetdata/eval/eval.h similarity index 98% rename from src/libnetdata/eval.h rename to libnetdata/eval/eval.h index c18b7f8e46..57dae9d0bf 100644 --- a/src/libnetdata/eval.h +++ b/libnetdata/eval/eval.h @@ -3,7 +3,7 @@ #ifndef NETDATA_EVAL_H #define NETDATA_EVAL_H 1 -#include "libnetdata.h" +#include "../libnetdata.h" #define EVAL_MAX_VARIABLE_NAME_LENGTH 300 diff --git a/src/libnetdata/inlined.h b/libnetdata/inlined.h similarity index 100% rename from src/libnetdata/inlined.h rename to libnetdata/inlined.h diff --git a/src/libnetdata/common.c b/libnetdata/libnetdata.c similarity index 100% rename from src/libnetdata/common.c rename to libnetdata/libnetdata.c diff --git a/src/libnetdata/libnetdata.h b/libnetdata/libnetdata.h similarity index 93% rename from src/libnetdata/libnetdata.h rename to libnetdata/libnetdata.h index 7480eabb56..b72f601bcf 100644 --- a/src/libnetdata/libnetdata.h +++ b/libnetdata/libnetdata.h @@ -205,24 +205,24 @@ #define GUID_LEN 36 #include "os.h" -#include "storage_number.h" -#include "web_buffer.h" -#include "locks.h" -#include "avl.h" +#include "storage_number/storage_number.h" +#include "buffer/buffer.h" +#include "locks/locks.h" +#include "avl/avl.h" #include "inlined.h" -#include "clocks.h" -#include "threads.h" -#include "popen.h" -#include "simple_pattern.h" -#include "socket.h" -#include "appconfig.h" -#include "log.h" -#include "procfile.h" -#include "dictionary.h" -#include "eval.h" -#include "statistical.h" -#include "adaptive_resortable_list.h" -#include "url.h" +#include "clocks/clocks.h" +#include "threads/threads.h" +#include "popen/popen.h" +#include "simple_pattern/simple_pattern.h" +#include "socket/socket.h" +#include "config/appconfig.h" +#include "log/log.h" +#include "procfile/procfile.h" +#include "dictionary/dictionary.h" +#include "eval/eval.h" +#include "statistical/statistical.h" +#include "adaptive_resortable_list/adaptive_resortable_list.h" +#include "url/url.h" extern void netdata_fix_chart_id(char *s); extern void netdata_fix_chart_name(char *s); diff --git a/libnetdata/locks/Makefile.am b/libnetdata/locks/Makefile.am new file mode 100644 index 0000000000..1cb69ed99a --- /dev/null +++ b/libnetdata/locks/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/locks/README.md b/libnetdata/locks/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/libnetdata/locks.c b/libnetdata/locks/locks.c similarity index 99% rename from src/libnetdata/locks.c rename to libnetdata/locks/locks.c index 0ffdd2c3e2..4e44b9d450 100644 --- a/src/libnetdata/locks.c +++ b/libnetdata/locks/locks.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "libnetdata.h" +#include "../libnetdata.h" // ---------------------------------------------------------------------------- // automatic thread cancelability management, based on locks diff --git a/src/libnetdata/locks.h b/libnetdata/locks/locks.h similarity index 99% rename from src/libnetdata/locks.h rename to libnetdata/locks/locks.h index 6f8f011c52..850dd7ebc0 100644 --- a/src/libnetdata/locks.h +++ b/libnetdata/locks/locks.h @@ -3,7 +3,7 @@ #ifndef NETDATA_LOCKS_H #define NETDATA_LOCKS_H 1 -#include "libnetdata.h" +#include "../libnetdata.h" typedef pthread_mutex_t netdata_mutex_t; #define NETDATA_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER diff --git a/libnetdata/log/Makefile.am b/libnetdata/log/Makefile.am new file mode 100644 index 0000000000..1cb69ed99a --- /dev/null +++ b/libnetdata/log/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/log/README.md b/libnetdata/log/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/libnetdata/log.c b/libnetdata/log/log.c similarity index 99% rename from src/libnetdata/log.c rename to libnetdata/log/log.c index 053dbbc1d6..198e98bd9f 100644 --- a/src/libnetdata/log.c +++ b/libnetdata/log/log.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "libnetdata.h" +#include "../libnetdata.h" int web_server_is_multithreaded = 1; diff --git a/src/libnetdata/log.h b/libnetdata/log/log.h similarity index 99% rename from src/libnetdata/log.h rename to libnetdata/log/log.h index ac7baa2340..48e1599a7b 100644 --- a/src/libnetdata/log.h +++ b/libnetdata/log/log.h @@ -3,7 +3,7 @@ #ifndef NETDATA_LOG_H #define NETDATA_LOG_H 1 -#include "libnetdata.h" +#include "../libnetdata.h" #define D_WEB_BUFFER 0x0000000000000001 #define D_WEB_CLIENT 0x0000000000000002 diff --git a/src/libnetdata/os.c b/libnetdata/os.c similarity index 100% rename from src/libnetdata/os.c rename to libnetdata/os.c diff --git a/src/libnetdata/os.h b/libnetdata/os.h similarity index 100% rename from src/libnetdata/os.h rename to libnetdata/os.h diff --git a/libnetdata/popen/Makefile.am b/libnetdata/popen/Makefile.am new file mode 100644 index 0000000000..1cb69ed99a --- /dev/null +++ b/libnetdata/popen/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/popen/README.md b/libnetdata/popen/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/libnetdata/popen.c b/libnetdata/popen/popen.c similarity index 99% rename from src/libnetdata/popen.c rename to libnetdata/popen/popen.c index 72a7133a2e..845363fd22 100644 --- a/src/libnetdata/popen.c +++ b/libnetdata/popen/popen.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "libnetdata.h" +#include "../libnetdata.h" /* struct mypopen { diff --git a/src/libnetdata/popen.h b/libnetdata/popen/popen.h similarity index 94% rename from src/libnetdata/popen.h rename to libnetdata/popen/popen.h index e6b7994717..90d4b829b0 100644 --- a/src/libnetdata/popen.h +++ b/libnetdata/popen/popen.h @@ -3,7 +3,7 @@ #ifndef NETDATA_POPEN_H #define NETDATA_POPEN_H 1 -#include "libnetdata.h" +#include "../libnetdata.h" #define PIPE_READ 0 #define PIPE_WRITE 1 diff --git a/libnetdata/procfile/Makefile.am b/libnetdata/procfile/Makefile.am new file mode 100644 index 0000000000..1cb69ed99a --- /dev/null +++ b/libnetdata/procfile/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/procfile/README.md b/libnetdata/procfile/README.md new file mode 100644 index 0000000000..279885f938 --- /dev/null +++ b/libnetdata/procfile/README.md @@ -0,0 +1,61 @@ + +# PROCFILE + +procfile is a library for reading text data files (i.e `/proc` files) in the fastest possible way. + +## How it works + +The library automatically adapts (through the iterations) its memory so that each file +is read with single `read()` call. + +Then the library splits the file into words, using the supplied separators. +The library also supported quoted words (i.e. strings within of which the separators are ignored). + +### Initialization + +Initially the caller: + +- calls `procfile_open()` to open the file and allocate the structures needed. + +### Iterations + +For each iteration, the caller: + +- calls `procfile_readall()` to read updated contents. + This call also rewinds (`lseek()` to 0) before reading it. + + For every file, a [BUFFER](../buffer/) is used that is automatically adjusted to fit + the entire file contents of the file. So the file is read with a single `read()` call + (providing atomicity / consistency when the data are read from the kernel). + + Once the data are read, 2 arrays of pointers are updated: + + - a `words` array, pointing to each word in the data read + - a `lines` array, pointing to the first word for each line + + This is highly optimized. Both arrays are automatically adjusted to + fit all contents and are updated in a single pass on the data. + + The library provides a number of macros: + + - `procfile_lines()` returns the # of lines read + - `procfile_linewords()` returns the # of words in the given line + - `procfile_word()` returns a pointer the given word # + - `procfile_line()` returns a pointer to the first word of the given line # + - `procfile_lineword()` returns a pointer to the given word # of the given line # + +### Cleanup + +When the caller exits: + +- calls `procfile_free()` to close the file and free all memory used. + +### Performance + +- a **raspberry Pi 1** (the oldest single core one) can process 5.000+ `/proc` files per second. +- a **J1900 Celeron** processor can process 23.000+ `/proc` files per second per core. + +To achieve this kind of performance, the library tries to work in batches so that the code +and the data are inside the processor's caches. + +This library is extensively used in netdata and its plugins. diff --git a/src/libnetdata/procfile.c b/libnetdata/procfile/procfile.c similarity index 99% rename from src/libnetdata/procfile.c rename to libnetdata/procfile/procfile.c index ff66132ec4..addf271580 100644 --- a/src/libnetdata/procfile.c +++ b/libnetdata/procfile/procfile.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "libnetdata.h" +#include "../libnetdata.h" #define PF_PREFIX "PROCFILE" diff --git a/src/libnetdata/procfile.h b/libnetdata/procfile/procfile.h similarity index 78% rename from src/libnetdata/procfile.h rename to libnetdata/procfile/procfile.h index 4447c5d6f7..b107358abd 100644 --- a/src/libnetdata/procfile.h +++ b/libnetdata/procfile/procfile.h @@ -1,33 +1,9 @@ // SPDX-License-Identifier: GPL-3.0-or-later -/* - * procfile is a library for reading kernel files from /proc - * - * The idea is this: - * - * - every file is opened once with procfile_open(). - * - * - to read updated contents, we rewind it (lseek() to 0) and read again - * with procfile_readall(). - * - * - for every file, we use a buffer that is adjusted to fit its entire - * contents in memory, allowing us to read it with a single read() call. - * (this provides atomicity / consistency on the data read from the kernel) - * - * - once the data are read, we update two arrays of pointers: - * - a words array, pointing to each word in the data read - * - a lines array, pointing to the first word for each line - * - * This is highly optimized. Both arrays are automatically adjusted to - * fit all contents and are updated in a single pass on the data: - * - a raspberry Pi can process 5.000+ files / sec. - * - a J1900 celeron processor can process 23.000+ files / sec. -*/ - #ifndef NETDATA_PROCFILE_H #define NETDATA_PROCFILE_H 1 -#include "libnetdata.h" +#include "../libnetdata.h" // ---------------------------------------------------------------------------- // An array of words diff --git a/libnetdata/simple_pattern/Makefile.am b/libnetdata/simple_pattern/Makefile.am new file mode 100644 index 0000000000..1cb69ed99a --- /dev/null +++ b/libnetdata/simple_pattern/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/simple_pattern/README.md b/libnetdata/simple_pattern/README.md new file mode 100644 index 0000000000..22ccf373a9 --- /dev/null +++ b/libnetdata/simple_pattern/README.md @@ -0,0 +1,36 @@ +## netdata simple patterns + +Unix prefers regular expressions. But they are just too hard, too cryptic +to use, write and understand. + +So, netdata supports **simple patterns**. + +Simple patterns are a space separated list of words, that can have `*` +as a wildcard. Each world may use any number of `*`. Simple patterns +allow **negative** matches by prefixing a word with `!`. + +So, `pattern = !*bad* *` will match anything, except all those that +contain the word `bad`. + +Simple patterns are quite powerful: `pattern = *foobar* !foo* !*bar *` +matches everything containing `foobar`, except strings that start +with `foo` or end with `bar`. + +You can use the netdata command line to check simple patterns, +like this: + +```sh +# netdata -W simple-pattern '*foobar* !foo* !*bar *' 'hello world' +RESULT: MATCHED - pattern '*foobar* !foo* !*bar *' matches 'hello world' + +# netdata -W simple-pattern '*foobar* !foo* !*bar *' 'hello world bar' +RESULT: NOT MATCHED - pattern '*foobar* !foo* !*bar *' does not match 'hello world bar' + +# netdata -W simple-pattern '*foobar* !foo* !*bar *' 'hello world foobar' +RESULT: MATCHED - pattern '*foobar* !foo* !*bar *' matches 'hello world foobar' +``` + +netdata stops processing to the first positive or negative match +(left to right). If it is not matched by either positive or negative +patterns, it is denied at the end. + diff --git a/src/libnetdata/simple_pattern.c b/libnetdata/simple_pattern/simple_pattern.c similarity index 99% rename from src/libnetdata/simple_pattern.c rename to libnetdata/simple_pattern/simple_pattern.c index 868c042bfd..57b0aecc82 100644 --- a/src/libnetdata/simple_pattern.c +++ b/libnetdata/simple_pattern/simple_pattern.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "libnetdata.h" +#include "../libnetdata.h" struct simple_pattern { const char *match; diff --git a/src/libnetdata/simple_pattern.h b/libnetdata/simple_pattern/simple_pattern.h similarity index 97% rename from src/libnetdata/simple_pattern.h rename to libnetdata/simple_pattern/simple_pattern.h index 5d6dcfd584..b96a018efe 100644 --- a/src/libnetdata/simple_pattern.h +++ b/libnetdata/simple_pattern/simple_pattern.h @@ -3,7 +3,7 @@ #ifndef NETDATA_SIMPLE_PATTERN_H #define NETDATA_SIMPLE_PATTERN_H -#include "libnetdata.h" +#include "../libnetdata.h" typedef enum { diff --git a/libnetdata/socket/Makefile.am b/libnetdata/socket/Makefile.am new file mode 100644 index 0000000000..1cb69ed99a --- /dev/null +++ b/libnetdata/socket/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/socket/README.md b/libnetdata/socket/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/libnetdata/socket.c b/libnetdata/socket/socket.c similarity index 99% rename from src/libnetdata/socket.c rename to libnetdata/socket/socket.c index b08528467a..5e65d907a0 100644 --- a/src/libnetdata/socket.c +++ b/libnetdata/socket/socket.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "libnetdata.h" +#include "../libnetdata.h" // -------------------------------------------------------------------------------------------------------------------- // various library calls diff --git a/src/libnetdata/socket.h b/libnetdata/socket/socket.h similarity index 99% rename from src/libnetdata/socket.h rename to libnetdata/socket/socket.h index 1f9ed3ec20..8594174ec9 100644 --- a/src/libnetdata/socket.h +++ b/libnetdata/socket/socket.h @@ -3,7 +3,7 @@ #ifndef NETDATA_SOCKET_H #define NETDATA_SOCKET_H -#include "libnetdata.h" +#include "../libnetdata.h" #ifndef MAX_LISTEN_FDS #define MAX_LISTEN_FDS 50 diff --git a/libnetdata/statistical/Makefile.am b/libnetdata/statistical/Makefile.am new file mode 100644 index 0000000000..1cb69ed99a --- /dev/null +++ b/libnetdata/statistical/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/statistical/README.md b/libnetdata/statistical/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/libnetdata/statistical.c b/libnetdata/statistical/statistical.c similarity index 99% rename from src/libnetdata/statistical.c rename to libnetdata/statistical/statistical.c index 699a58ce2a..78a0045305 100644 --- a/src/libnetdata/statistical.c +++ b/libnetdata/statistical/statistical.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "libnetdata.h" +#include "../libnetdata.h" // -------------------------------------------------------------------------------------------------------------------- diff --git a/src/libnetdata/statistical.h b/libnetdata/statistical/statistical.h similarity index 98% rename from src/libnetdata/statistical.h rename to libnetdata/statistical/statistical.h index f8a426177b..e9fd205ad5 100644 --- a/src/libnetdata/statistical.h +++ b/libnetdata/statistical/statistical.h @@ -3,7 +3,7 @@ #ifndef NETDATA_STATISTICAL_H #define NETDATA_STATISTICAL_H 1 -#include "libnetdata.h" +#include "../libnetdata.h" extern LONG_DOUBLE average(const LONG_DOUBLE *series, size_t entries); extern LONG_DOUBLE moving_average(const LONG_DOUBLE *series, size_t entries, size_t period); diff --git a/libnetdata/storage_number/Makefile.am b/libnetdata/storage_number/Makefile.am new file mode 100644 index 0000000000..1cb69ed99a --- /dev/null +++ b/libnetdata/storage_number/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/storage_number/README.md b/libnetdata/storage_number/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/libnetdata/storage_number.c b/libnetdata/storage_number/storage_number.c similarity index 99% rename from src/libnetdata/storage_number.c rename to libnetdata/storage_number/storage_number.c index 98f81418a8..db4cb700b4 100644 --- a/src/libnetdata/storage_number.c +++ b/libnetdata/storage_number/storage_number.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "libnetdata.h" +#include "../libnetdata.h" storage_number pack_storage_number(calculated_number value, uint32_t flags) { diff --git a/src/libnetdata/storage_number.h b/libnetdata/storage_number/storage_number.h similarity index 99% rename from src/libnetdata/storage_number.h rename to libnetdata/storage_number/storage_number.h index c68b9f17c5..5353ab60b9 100644 --- a/src/libnetdata/storage_number.h +++ b/libnetdata/storage_number/storage_number.h @@ -3,7 +3,7 @@ #ifndef NETDATA_STORAGE_NUMBER_H #define NETDATA_STORAGE_NUMBER_H 1 -#include "libnetdata.h" +#include "../libnetdata.h" #ifdef NETDATA_WITHOUT_LONG_DOUBLE diff --git a/libnetdata/threads/Makefile.am b/libnetdata/threads/Makefile.am new file mode 100644 index 0000000000..1cb69ed99a --- /dev/null +++ b/libnetdata/threads/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/threads/README.md b/libnetdata/threads/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/libnetdata/threads.c b/libnetdata/threads/threads.c similarity index 99% rename from src/libnetdata/threads.c rename to libnetdata/threads/threads.c index da77fc8dbb..133d9a5471 100644 --- a/src/libnetdata/threads.c +++ b/libnetdata/threads/threads.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "libnetdata.h" +#include "../libnetdata.h" static size_t default_stacksize = 0, wanted_stacksize = 0; static pthread_attr_t *attr = NULL; diff --git a/src/libnetdata/threads.h b/libnetdata/threads/threads.h similarity index 98% rename from src/libnetdata/threads.h rename to libnetdata/threads/threads.h index 62e45355f7..eec6ad0e31 100644 --- a/src/libnetdata/threads.h +++ b/libnetdata/threads/threads.h @@ -3,7 +3,7 @@ #ifndef NETDATA_THREADS_H #define NETDATA_THREADS_H 1 -#include "libnetdata.h" +#include "../libnetdata.h" extern pid_t gettid(void); diff --git a/libnetdata/url/Makefile.am b/libnetdata/url/Makefile.am new file mode 100644 index 0000000000..1cb69ed99a --- /dev/null +++ b/libnetdata/url/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/libnetdata/url/README.md b/libnetdata/url/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/libnetdata/url.c b/libnetdata/url/url.c similarity index 98% rename from src/libnetdata/url.c rename to libnetdata/url/url.c index f62acec851..8a96063a03 100644 --- a/src/libnetdata/url.c +++ b/libnetdata/url/url.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "libnetdata.h" +#include "../libnetdata.h" // ---------------------------------------------------------------------------- // URL encode / decode diff --git a/src/libnetdata/url.h b/libnetdata/url/url.h similarity index 96% rename from src/libnetdata/url.h rename to libnetdata/url/url.h index 5cead4ae93..6cef6d7a84 100644 --- a/src/libnetdata/url.h +++ b/libnetdata/url/url.h @@ -3,7 +3,7 @@ #ifndef NETDATA_URL_H #define NETDATA_URL_H 1 -#include "libnetdata.h" +#include "../libnetdata.h" // ---------------------------------------------------------------------------- // URL encode / decode diff --git a/netdata-installer.sh b/netdata-installer.sh index 448a0422a1..6e982b4c14 100755 --- a/netdata-installer.sh +++ b/netdata-installer.sh @@ -173,49 +173,6 @@ For the plugins, you will at least need: USAGE } -# shellcheck disable=SC2230 -md5sum="$(which md5sum 2>/dev/null || command -v md5sum 2>/dev/null || command -v md5 2>/dev/null)" -get_git_config_signatures() { - local x s file md5 - - [ ! -d "conf.d" ] && echo >&2 "Wrong directory." && return 1 - [ -z "${md5sum}" -o ! -x "${md5sum}" ] && echo >&2 "No md5sum command." && return 1 - - echo >configs.signatures.tmp - - for x in $(find conf.d -name \*.conf) - do - x="${x/conf.d\//}" - echo "${x}" - for c in $(git log --follow "conf.d/${x}" | grep ^commit | cut -d ' ' -f 2) - do - git checkout ${c} "conf.d/${x}" || continue - s="$(cat "conf.d/${x}" | ${md5sum} | cut -d ' ' -f 1)" - echo >>configs.signatures.tmp "${s}:${x}" - echo " ${s}" - done - git checkout HEAD "conf.d/${x}" || break - done - - cat configs.signatures.tmp |\ - grep -v "^$" |\ - sort -u |\ - { - echo "declare -A configs_signatures=(" - IFS=":" - while read md5 file - do - echo " ['${md5}']='${file}'" - done - echo ")" - } >configs.signatures - - rm configs.signatures.tmp - - return 0 -} - - while [ ! -z "${1}" ] do if [ "$1" = "--install" ] @@ -270,10 +227,6 @@ do then usage exit 1 - elif [ "$1" = "get_git_config_signatures" ] - then - get_git_config_signatures && exit 0 - exit 1 else echo >&2 echo >&2 "ERROR:" @@ -546,6 +499,10 @@ if [ -d "${NETDATA_PREFIX}/etc/netdata" ] fi # ----------------------------------------------------------------------------- + +# shellcheck disable=SC2230 +md5sum="$(which md5sum 2>/dev/null || command -v md5sum 2>/dev/null || command -v md5 2>/dev/null)" + deleted_stock_configs=0 if [ ! -f "${NETDATA_PREFIX}/etc/netdata/.installer-cleanup-of-stock-configs-done" ] then @@ -962,7 +919,7 @@ fi # ----------------------------------------------------------------------------- progress "Check version.txt" -if [ ! -s web/version.txt ] +if [ ! -s webserver/gui/version.txt ] then cat <<VERMSG diff --git a/node.d/Makefile.am b/node.d/Makefile.am deleted file mode 100644 index 157d922a8f..0000000000 --- a/node.d/Makefile.am +++ /dev/null @@ -1,29 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later -MAINTAINERCLEANFILES= $(srcdir)/Makefile.in - -dist_node_DATA = \ - README.md \ - named.node.js \ - fronius.node.js \ - sma_webbox.node.js \ - snmp.node.js \ - stiebeleltron.node.js \ - $(NULL) - -nodemodulesdir=$(nodedir)/node_modules -dist_nodemodules_DATA = \ - node_modules/netdata.js \ - node_modules/extend.js \ - node_modules/pixl-xml.js \ - node_modules/net-snmp.js \ - node_modules/asn1-ber.js \ - $(NULL) - -nodemoduleslibberdir=$(nodedir)/node_modules/lib/ber -dist_nodemoduleslibber_DATA = \ - node_modules/lib/ber/index.js \ - node_modules/lib/ber/errors.js \ - node_modules/lib/ber/reader.js \ - node_modules/lib/ber/types.js \ - node_modules/lib/ber/writer.js \ - $(NULL) diff --git a/node.d/README.md b/node.d/README.md deleted file mode 100644 index 7902fd967b..0000000000 --- a/node.d/README.md +++ /dev/null @@ -1,118 +0,0 @@ -# Disclaimer - -Module configurations are written in JSON and **node.js is required**. - -to be edited. - ---- - -The following node.d modules are supported: - -# fronius - -This module collects metrics from the configured solar power installation from Fronius Symo. -See `netdata/conf.d/node.d/fronius.conf.md` for more details. - -**Requirements** - * Configuration file `fronius.conf` in the node.d netdata config dir (default: `/etc/netdata/node.d/fronius.conf`) - * Fronius Symo with network access (http) - -It produces per server: - -1. **Power** - * Current power input from the grid (positive values), output to the grid (negative values), in W - * Current power input from the solar panels, in W - * Current power stored in the accumulator (if present), in W (in theory, untested) - -2. **Consumption** - * Local consumption in W - -3. **Autonomy** - * Relative autonomy in %. 100 % autonomy means that the solar panels are delivering more power than it is needed by local consumption. - * Relative self consumption in %. The lower the better - -4. **Energy** - * The energy produced during the current day, in kWh - * The energy produced during the current year, in kWh - -5. **Inverter** - * The current power output from the connected inverters, in W, one dimension per inverter. At least one is always present. - - -### configuration - -Sample: - -```json -{ - "enable_autodetect": false, - "update_every": 5, - "servers": [ - { - "name": "Symo", - "hostname": "symo.ip.or.dns", - "update_every": 5, - "api_path": "/solar_api/v1/GetPowerFlowRealtimeData.fcgi" - } - ] -} -``` - -If no configuration is given, the module will be disabled. Each `update_every` is optional, the default is `5`. - ---- - -# stiebel eltron - -This module collects metrics from the configured heat pump and hot water installation from Stiebel Eltron ISG web. -See `netdata/conf.d/node.d/stiebeleltron.conf.md` for more details. - -**Requirements** - * Configuration file `stiebeleltron.conf` in the node.d netdata config dir (default: `/etc/netdata/node.d/stiebeleltron.conf`) - * Stiebel Eltron ISG web with network access (http), without password login - -The charts are configurable, however, the provided default configuration collects the following: - -1. **General** - * Outside temperature in C - * Condenser temperature in C - * Heating circuit pressure in bar - * Flow rate in l/min - * Output of water and heat pumps in % - -2. **Heating** - * Heat circuit 1 temperature in C (set/actual) - * Heat circuit 2 temperature in C (set/actual) - * Flow temperature in C (set/actual) - * Buffer temperature in C (set/actual) - * Pre-flow temperature in C - -3. **Hot Water** - * Hot water temperature in C (set/actual) - -4. **Room Temperature** - * Heat circuit 1 room temperature in C (set/actual) - * Heat circuit 2 room temperature in C (set/actual) - -5. **Eletric Reheating** - * Dual Mode Reheating temperature in C (hot water/heating) - -6. **Process Data** - * Remaining compressor rest time in s - -7. **Runtime** - * Compressor runtime hours (hot water/heating) - * Reheating runtime hours (reheating 1/reheating 2) - -8. **Energy** - * Compressor today in kWh (hot water/heating) - * Compressor Total in kWh (hot water/heating) - - -### configuration - -The default configuration is provided in [netdata/conf.d/node.d/stiebeleltron.conf.md](https://github.com/netdata/netdata/blob/master/conf.d/node.d/stiebeleltron.conf.md). Just change the `update_every` (if necessary) and hostnames. **You may have to adapt the configuration to suit your needs and setup** (which might be different). - -If no configuration is given, the module will be disabled. Each `update_every` is optional, the default is `10`. - ---- diff --git a/plugins.d/Makefile.am b/plugins.d/Makefile.am deleted file mode 100644 index 75944a4f87..0000000000 --- a/plugins.d/Makefile.am +++ /dev/null @@ -1,43 +0,0 @@ -# -# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com> -# SPDX-License-Identifier: GPL-3.0-or-later -# -MAINTAINERCLEANFILES= $(srcdir)/Makefile.in -CLEANFILES = \ - alarm-notify.sh \ - charts.d.plugin \ - fping.plugin \ - node.d.plugin \ - python.d.plugin \ - tc-qos-helper.sh \ - $(NULL) - -include $(top_srcdir)/build/subst.inc - -SUFFIXES = .in - -dist_plugins_DATA = \ - README.md \ - $(NULL) - -dist_plugins_SCRIPTS = \ - alarm-email.sh \ - alarm-notify.sh \ - alarm-test.sh \ - charts.d.dryrun-helper.sh \ - charts.d.plugin \ - fping.plugin \ - node.d.plugin \ - python.d.plugin \ - tc-qos-helper.sh \ - loopsleepms.sh.inc \ - $(NULL) - -dist_noinst_DATA = \ - alarm-notify.sh.in \ - charts.d.plugin.in \ - fping.plugin.in \ - node.d.plugin.in \ - python.d.plugin.in \ - tc-qos-helper.sh.in \ - $(NULL) diff --git a/plugins.d/README.md b/plugins.d/README.md deleted file mode 100644 index d2e2ed4c2b..0000000000 --- a/plugins.d/README.md +++ /dev/null @@ -1,236 +0,0 @@ -netdata plugins -=============== - -Any program that can print a few values to its standard output can become -a netdata plugin. - -There are 5 lines netdata parses. lines starting with: - -- `CHART` - create a new chart -- `DIMENSION` - add a dimension to the chart just created -- `BEGIN` - initialize data collection for a chart -- `SET` - set the value of a dimension for the initialized chart -- `END` - complete data collection for the initialized chart - -a single program can produce any number of charts with any number of dimensions -each. - -charts can also be added any time (not just the beginning). - -### command line parameters - -The plugin should accept just **one** parameter: **the number of seconds it is -expected to update the values for its charts**. The value passed by netdata -to the plugin is controlled via its configuration file (so there is not need -for the plugin to handle this configuration option). - -The script can overwrite the update frequency. For example, the server may -request per second updates, but the script may overwrite this to one update -every 5 seconds. - -### environment variables - -There are a few environment variables that are set by `netdata` and are -available for the plugin to use. - -variable|description -:------:|:---------- -`NETDATA_CONFIG_DIR`|The directory where all netdata related configuration should be stored. If the plugin requires custom configuration, this is the place to save it. -`NETDATA_PLUGINS_DIR`|The directory where all netdata plugins are stored. -`NETDATA_WEB_DIR`|The directory where the web files of netdata are saved. -`NETDATA_CACHE_DIR`|The directory where the cache files of netdata are stored. Use this directory if the plugin requires a place to store data. A new directory should be created for the plugin for this purpose, inside this directory. -`NETDATA_LOG_DIR`|The directory where the log files are stored. By default the `stderr` output of the plugin will be saved in the `error.log` file of netdata. -`NETDATA_HOST_PREFIX`|This is used in environments where system directories like `/sys` and `/proc` have to be accessed at a different path. -`NETDATA_DEBUG_FLAGS`|This is number (probably in hex starting with `0x`), that enables certain netdata debugging features. -`NETDATA_UPDATE_EVERY`|The minimum number of seconds between chart refreshes. This is like the **internal clock** of netdata (it is user configurable, defaulting to `1`). There is no meaning for a plugin to update its values more frequently than this number of seconds. - - -# the output of the plugin - -The plugin should output instructions for netdata to its output (`stdout`). - -## CHART - -`CHART` defines a new chart. - -the template is: - -> CHART type.id name title units [family [category [charttype [priority [update_every]]]]] - - where: - - `type.id` - - uniquely identifies the chart, - this is what will be needed to add values to the chart - - - `name` - - is the name that will be presented to the used for this chart - - - `title` - - the text above the chart - - - `units` - - the label of the vertical axis of the chart, - all dimensions added to a chart should have the same units - of measurement - - - `family` - - is used to group charts together - (for example all eth0 charts should say: eth0), - if empty or missing, the `id` part of `type.id` will be used - - - `category` - - the section under which the chart will appear - (for example mem.ram should appear in the 'system' section), - the special word 'none' means: do not show this chart on the home page, - if empty or missing, the `type` part of `type.id` will be used - - - `charttype` - - one of `line`, `area` or `stacked`, - if empty or missing, the `line` will be used - - - `priority` - - is the relative priority of the charts as rendered on the web page, - lower numbers make the charts appear before the ones with higher numbers, - if empty or missing, `1000` will be used - - - `update_every` - - overwrite the update frequency set by the server, - if empty or missing, the user configured value will be used - - -## DIMENSION - -`DIMENSION` defines a new dimension for the chart - -the template is: - -> DIMENSION id [name [algorithm [multiplier [divisor [hidden]]]]] - - where: - - - `id` - - the `id` of this dimension (it is a text value, not numeric), - this will be needed later to add values to the dimension - - - `name` - - the name of the dimension as it will appear at the legend of the chart, - if empty or missing the `id` will be used - - - `algorithm` - - one of: - - * `absolute` - - the value is to drawn as-is (interpolated to second boundary), - if `algorithm` is empty, invalid or missing, `absolute` is used - - * `incremental` - - the value increases over time, - the difference from the last value is presented in the chart, - the server interpolates the value and calculates a per second figure - - * `percentage-of-absolute-row` - - the % of this value compared to the total of all dimensions - - * `percentage-of-incremental-row` - - the % of this value compared to the incremental total of - all dimensions - - - `multiplier` - - an integer value to multiply the collected value, - if empty or missing, `1` is used - - - `divisor` - - an integer value to divide the collected value, - if empty or missing, `1` is used - - - `hidden` - - giving the keyword `hidden` will make this dimension hidden, - it will take part in the calculations but will not be presented in the chart - - -## data collection - -data collection is defined as a series of `BEGIN` -> `SET` -> `END` lines - -> BEGIN type.id [microseconds] - - - `type.id` - - is the unique identification of the chart (as given in `CHART`) - - - `microseconds` - - is the number of microseconds since the last update of the chart, - it is optional. - - Under heavy system load, the system may have some latency transferring - data from the plugins to netdata via the pipe. This number improves - accuracy significantly, since the plugin is able to calculate the - duration between its iterations better than netdata. - - The first time the plugin is started, no microseconds should be given - to netdata. - -> SET id = value - - - `id` - - is the unique identification of the dimension (of the chart just began) - - - `value` - - is the collected value - -> END - - END does not take any parameters, it commits the collected values to the chart. - -More `SET` lines may appear to update all the dimensions of the chart. -All of them in one `BEGIN` -> `END` block. - -All `SET` lines within a single `BEGIN` -> `END` block have to refer to the -same chart. - -If more charts need to be updated, each chart should have its own -`BEGIN` -> `SET` -> `END` block. - -If, for any reason, a plugin has issued a `BEGIN` but wants to cancel it, -it can issue a `FLUSH`. The `FLUSH` command will instruct netdata to ignore -the last `BEGIN` command. - -If a plugin does not behave properly (outputs invalid lines, or does not -follow these guidelines), will be disabled by netdata. - - -### collected values - -netdata will collect any **signed** value in the 64bit range: -`-9.223.372.036.854.775.808` to `+9.223.372.036.854.775.807` - -Internally, all calculations are made using 128 bit double precision and are -stored in 30 bits as floating point. - -If a value is not collected, leave it empty, like this: - -`SET id = ` - -or do not output the line at all. diff --git a/python.d/README.md b/python.d/README.md deleted file mode 100644 index 3ce63cf30c..0000000000 --- a/python.d/README.md +++ /dev/null @@ -1,2889 +0,0 @@ -# Disclaimer - -Every module should be compatible with python2 and python3. -All third party libraries should be installed system-wide or in `python_modules` directory. -Module configurations are written in YAML and **pyYAML is required**. - -Every configuration file must have one of two formats: - -- Configuration for only one job: - -```yaml -update_every : 2 # update frequency -retries : 1 # how many failures in update() is tolerated -priority : 20000 # where it is shown on dashboard - -other_var1 : bla # variables passed to module -other_var2 : alb -``` - -- Configuration for many jobs (ex. mysql): - -```yaml -# module defaults: -update_every : 2 -retries : 1 -priority : 20000 - -local: # job name - update_every : 5 # job update frequency - other_var1 : some_val # module specific variable - -other_job: - priority : 5 # job position on dashboard - retries : 20 # job retries - other_var2 : val # module specific variable -``` - -`update_every`, `retries`, and `priority` are always optional. - ---- - -The following python.d modules are supported: - -# apache - -This module will monitor one or more Apache servers depending on configuration. - -**Requirements:** - * apache with enabled `mod_status` - -It produces the following charts: - -1. **Requests** in requests/s - * requests - -2. **Connections** - * connections - -3. **Async Connections** - * keepalive - * closing - * writing - -4. **Bandwidth** in kilobytes/s - * sent - -5. **Workers** - * idle - * busy - -6. **Lifetime Avg. Requests/s** in requests/s - * requests_sec - -7. **Lifetime Avg. Bandwidth/s** in kilobytes/s - * size_sec - -8. **Lifetime Avg. Response Size** in bytes/request - * size_req - -### configuration - -Needs only `url` to server's `server-status?auto` - -Here is an example for 2 servers: - -```yaml -update_every : 10 -priority : 90100 - -local: - url : 'http://localhost/server-status?auto' - retries : 20 - -remote: - url : 'http://www.apache.org/server-status?auto' - update_every : 5 - retries : 4 -``` - -Without configuration, module attempts to connect to `http://localhost/server-status?auto` - ---- - -# apache_cache - -Module monitors apache mod_cache log and produces only one chart: - -**cached responses** in percent cached - * hit - * miss - * other - -### configuration - -Sample: - -```yaml -update_every : 10 -priority : 120000 -retries : 5 -log_path : '/var/log/apache2/cache.log' -``` - -If no configuration is given, module will attempt to read log file at `/var/log/apache2/cache.log` - ---- - -# beanstalk - -Module provides server and tube-level statistics: - -**Requirements:** - * `python-beanstalkc` - -**Server statistics:** - -1. **Cpu usage** in cpu time - * user - * system - -2. **Jobs rate** in jobs/s - * total - * timeouts - -3. **Connections rate** in connections/s - * connections - -4. **Commands rate** in commands/s - * put - * peek - * peek-ready - * peek-delayed - * peek-buried - * reserve - * use - * watch - * ignore - * delete - * release - * bury - * kick - * stats - * stats-job - * stats-tube - * list-tubes - * list-tube-used - * list-tubes-watched - * pause-tube - -5. **Current tubes** in tubes - * tubes - -6. **Current jobs** in jobs - * urgent - * ready - * reserved - * delayed - * buried - -7. **Current connections** in connections - * written - * producers - * workers - * waiting - -8. **Binlog** in records/s - * written - * migrated - -9. **Uptime** in seconds - * uptime - -**Per tube statistics:** - -1. **Jobs rate** in jobs/s - * jobs - -2. **Jobs** in jobs - * using - * ready - * reserved - * delayed - * buried - -3. **Connections** in connections - * using - * waiting - * watching - -4. **Commands** in commands/s - * deletes - * pauses - -5. **Pause** in seconds - * since - * left - - -### configuration - -Sample: - -```yaml -host : '127.0.0.1' -port : 11300 -``` - -If no configuration is given, module will attempt to connect to beanstalkd on `127.0.0.1:11300` address - ---- - -# bind_rndc - -Module parses bind dump file to collect real-time performance metrics - -**Requirements:** - * Version of bind must be 9.6 + - * Netdata must have permissions to run `rndc stats` - -It produces: - -1. **Name server statistics** - * requests - * responses - * success - * auth_answer - * nonauth_answer - * nxrrset - * failure - * nxdomain - * recursion - * duplicate - * rejections - -2. **Incoming queries** - * RESERVED0 - * A - * NS - * CNAME - * SOA - * PTR - * MX - * TXT - * X25 - * AAAA - * SRV - * NAPTR - * A6 - * DS - * RSIG - * DNSKEY - * SPF - * ANY - * DLV - -3. **Outgoing queries** - * Same as Incoming queries - - -### configuration - -Sample: - -```yaml -local: - named_stats_path : '/var/log/bind/named.stats' -``` - -If no configuration is given, module will attempt to read named.stats file at `/var/log/bind/named.stats` - ---- - -# boinc - -This module monitors task counts for the Berkely Open Infrastructure -Networking Computing (BOINC) distributed computing client using the same -RPC interface that the BOINC monitoring GUI does. - -It provides charts tracking the total number of tasks and active tasks, -as well as ones tracking each of the possible states for tasks. - -### configuration - -BOINC requires use of a password to access it's RPC interface. You can -find this password in the `gui_rpc_auth.cfg` file in your BOINC directory. - -By default, the module will try to auto-detect the password by looking -in `/var/lib/boinc` for this file (this is the location most Linux -distributions use for a system-wide BOINC installation), so things may -just work without needing configuration for the local system. - -You can monitor remote systems as well: - -```yaml -remote: - hostname: some-host - password: some-password -``` - ---- - -# chrony - -This module monitors the precision and statistics of a local chronyd server. - -It produces: - -* frequency -* last offset -* RMS offset -* residual freq -* root delay -* root dispersion -* skew -* system time - -**Requirements:** -Verify that user netdata can execute `chronyc tracking`. If necessary, update `/etc/chrony.conf`, `cmdallow`. - -### Configuration - -Sample: -```yaml -# data collection frequency: -update_every: 1 - -# chrony query command: -local: - command: 'chronyc -n tracking' -``` - ---- - -# ceph - -This module monitors the ceph cluster usage and consuption data of a server. - -It produces: - -* Cluster statistics (usage, available, latency, objects, read/write rate) -* OSD usage -* OSD latency -* Pool usage -* Pool read/write operations -* Pool read/write rate -* number of objects per pool - -**Requirements:** - -- `rados` python module -- Granting read permissions to ceph group from keyring file -```shell -# chmod 640 /etc/ceph/ceph.client.admin.keyring -``` - -### Configuration - -Sample: -```yaml -local: - config_file: '/etc/ceph/ceph.conf' - keyring_file: '/etc/ceph/ceph.client.admin.keyring' -``` - ---- - -# couchdb - -This module monitors vital statistics of a local Apache CouchDB 2.x server, including: - -* Overall server reads/writes -* HTTP traffic breakdown - * Request methods (`GET`, `PUT`, `POST`, etc.) - * Response status codes (`200`, `201`, `4xx`, etc.) -* Active server tasks -* Replication status (CouchDB 2.1 and up only) -* Erlang VM stats -* Optional per-database statistics: sizes, # of docs, # of deleted docs - -### Configuration - -Sample for a local server running on port 5984: -```yaml -local: - user: 'admin' - pass: 'password' - node: 'couchdb@127.0.0.1' -``` - -Be sure to specify a correct admin-level username and password. - -You may also need to change the `node` name; this should match the value of `-name NODENAME` in your CouchDB's `etc/vm.args` file. Typically this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` / `couchdb@localhost` for a single-node server. - -If you want per-database statistics, these need to be added to the configuration, separated by spaces: -```yaml -local: - ... - databases: 'db1 db2 db3 ...' -``` - ---- - -# cpufreq - -This module shows the current CPU frequency as set by the cpufreq kernel -module. - -**Requirement:** -You need to have `CONFIG_CPU_FREQ` and (optionally) `CONFIG_CPU_FREQ_STAT` -enabled in your kernel. - -This module tries to read from one of two possible locations. On -initialization, it tries to read the `time_in_state` files provided by -cpufreq\_stats. If this file does not exist, or doesn't contain valid data, it -falls back to using the more inaccurate `scaling_cur_freq` file (which only -represents the **current** CPU frequency, and doesn't account for any state -changes which happen between updates). - -It produces one chart with multiple lines (one line per core). - -### configuration - -Sample: - -```yaml -sys_dir: "/sys/devices" -``` - -If no configuration is given, module will search for cpufreq files in `/sys/devices` directory. -Directory is also prefixed with `NETDATA_HOST_PREFIX` if specified. - ---- - -# cpuidle - -This module monitors the usage of CPU idle states. - -**Requirement:** -Your kernel needs to have `CONFIG_CPU_IDLE` enabled. - -It produces one stacked chart per CPU, showing the percentage of time spent in -each state. - ---- -# dns_query_time - -This module provides DNS query time statistics. - -**Requirement:** -* `python-dnspython` package - -It produces one aggregate chart or one chart per DNS server, showing the query time. - ---- - -# dnsdist - -Module monitor dnsdist performance and health metrics. - -Following charts are drawn: - -1. **Response latency** - * latency-slow - * latency100-1000 - * latency50-100 - * latency10-50 - * latency1-10 - * latency0-1 - -2. **Cache performance** - * cache-hits - * cache-misses - -3. **ACL events** - * acl-drops - * rule-drop - * rule-nxdomain - * rule-refused - -4. **Noncompliant data** - * empty-queries - * no-policy - * noncompliant-queries - * noncompliant-responses - -5. **Queries** - * queries - * rdqueries - * rdqueries - -6. **Health** - * downstream-send-errors - * downstream-timeouts - * servfail-responses - * trunc-failures - -### configuration - -```yaml -localhost: - name : 'local' - url : 'http://127.0.0.1:5053/jsonstat?command=stats' - user : 'username' - pass : 'password' - header: - X-API-Key: 'dnsdist-api-key' -``` - ---- - -# docker - -Module monitor docker health metrics. - -**Requirement:** -* `docker` package - -Following charts are drawn: - -1. **running containers** - * count - -2. **healthy containers** - * count - -3. **unhealthy containers** - * count - -### configuration - -```yaml - update_every : 1 - priority : 60000 - ``` - ---- - -# dovecot - -This module provides statistics information from Dovecot server. -Statistics are taken from dovecot socket by executing `EXPORT global` command. -More information about dovecot stats can be found on [project wiki page.](http://wiki2.dovecot.org/Statistics) - -**Requirement:** -Dovecot UNIX socket with R/W permissions for user netdata or Dovecot with configured TCP/IP socket. - -Module gives information with following charts: - -1. **sessions** - * active sessions - -2. **logins** - * logins - -3. **commands** - number of IMAP commands - * commands - -4. **Faults** - * minor - * major - -5. **Context Switches** - * volountary - * involountary - -6. **disk** in bytes/s - * read - * write - -7. **bytes** in bytes/s - * read - * write - -8. **number of syscalls** in syscalls/s - * read - * write - -9. **lookups** - number of lookups per second - * path - * attr - -10. **hits** - number of cache hits - * hits - -11. **attempts** - authorization attempts - * success - * failure - -12. **cache** - cached authorization hits - * hit - * miss - -### configuration - -Sample: - -```yaml -localtcpip: - name : 'local' - host : '127.0.0.1' - port : 24242 - -localsocket: - name : 'local' - socket : '/var/run/dovecot/stats' -``` - -If no configuration is given, module will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats` - ---- - -# elasticsearch - -This module monitors Elasticsearch performance and health metrics. - -It produces: - -1. **Search performance** charts: - * Number of queries, fetches - * Time spent on queries, fetches - * Query and fetch latency - -2. **Indexing performance** charts: - * Number of documents indexed, index refreshes, flushes - * Time spent on indexing, refreshing, flushing - * Indexing and flushing latency - -3. **Memory usage and garbace collection** charts: - * JVM heap currently in use, committed - * Count of garbage collections - * Time spent on garbage collections - -4. **Host metrics** charts: - * Available file descriptors in percent - * Opened HTTP connections - * Cluster communication transport metrics - -5. **Queues and rejections** charts: - * Number of queued/rejected threads in thread pool - -6. **Fielddata cache** charts: - * Fielddata cache size - * Fielddata evictions and circuit breaker tripped count - -7. **Cluster health API** charts: - * Cluster status - * Nodes and tasks statistics - * Shards statistics - -8. **Cluster stats API** charts: - * Nodes statistics - * Query cache statistics - * Docs statistics - * Store statistics - * Indices and shards statistics - -### configuration - -Sample: - -```yaml -local: - host : 'ipaddress' # Server ip address or hostname - port : 'password' # Port on which elasticsearch listed - cluster_health : True/False # Calls to cluster health elasticsearch API. Enabled by default. - cluster_stats : True/False # Calls to cluster stats elasticsearch API. Enabled by default. -``` - -If no configuration is given, module will fail to run. - ---- - -# exim - -Simple module executing `exim -bpc` to grab exim queue. -This command can take a lot of time to finish its execution thus it is not recommended to run it every second. - -It produces only one chart: - -1. **Exim Queue Emails** - * emails - -Configuration is not needed. - ---- - -# fail2ban - -Module monitor fail2ban log file to show all bans for all active jails - -**Requirements:** - * fail2ban.log file MUST BE readable by netdata (A good idea is to add **create 0640 root netdata** to fail2ban conf at logrotate.d) - -It produces one chart with multiple lines (one line per jail) - -### configuration - -Sample: - -```yaml -local: - log_path: '/var/log/fail2ban.log' - conf_path: '/etc/fail2ban/jail.local' - exclude: 'dropbear apache' -``` -If no configuration is given, module will attempt to read log file at `/var/log/fail2ban.log` and conf file at `/etc/fail2ban/jail.local`. -If conf file is not found default jail is `ssh`. - ---- - -# freeradius - -Uses the `radclient` command to provide freeradius statistics. It is not recommended to run it every second. - -It produces: - -1. **Authentication counters:** - * access-accepts - * access-rejects - * auth-dropped-requests - * auth-duplicate-requests - * auth-invalid-requests - * auth-malformed-requests - * auth-unknown-types - -2. **Accounting counters:** [optional] - * accounting-requests - * accounting-responses - * acct-dropped-requests - * acct-duplicate-requests - * acct-invalid-requests - * acct-malformed-requests - * acct-unknown-types - -3. **Proxy authentication counters:** [optional] - * proxy-access-accepts - * proxy-access-rejects - * proxy-auth-dropped-requests - * proxy-auth-duplicate-requests - * proxy-auth-invalid-requests - * proxy-auth-malformed-requests - * proxy-auth-unknown-types - -4. **Proxy accounting counters:** [optional] - * proxy-accounting-requests - * proxy-accounting-responses - * proxy-acct-dropped-requests - * proxy-acct-duplicate-requests - * proxy-acct-invalid-requests - * proxy-acct-malformed-requests - * proxy-acct-unknown-typesa - - -### configuration - -Sample: - -```yaml -local: - host : 'localhost' - port : '18121' - secret : 'adminsecret' - acct : False # Freeradius accounting statistics. - proxy_auth : False # Freeradius proxy authentication statistics. - proxy_acct : False # Freeradius proxy accounting statistics. -``` - -**Freeradius server configuration:** - -The configuration for the status server is automatically created in the sites-available directory. -By default, server is enabled and can be queried from every client. -FreeRADIUS will only respond to status-server messages, if the status-server virtual server has been enabled. - -To do this, create a link from the sites-enabled directory to the status file in the sites-available directory: - * cd sites-enabled - * ln -s ../sites-available/status status - -and restart/reload your FREERADIUS server. - ---- - -# go_expvar - ---- - -The `go_expvar` module can monitor any Go application that exposes its metrics with the use of `expvar` package from the Go standard library. - -`go_expvar` produces charts for Go runtime memory statistics and optionally any number of custom charts. Please see the [wiki page](https://github.com/netdata/netdata/wiki/Monitoring-Go-Applications) for more info. - -For the memory statistics, it produces the following charts: - -1. **Heap allocations** in kB - * alloc: size of objects allocated on the heap - * inuse: size of allocated heap spans - -2. **Stack allocations** in kB - * inuse: size of allocated stack spans - -3. **MSpan allocations** in kB - * inuse: size of allocated mspan structures - -4. **MCache allocations** in kB - * inuse: size of allocated mcache structures - -5. **Virtual memory** in kB - * sys: size of reserved virtual address space - -6. **Live objects** - * live: number of live objects in memory - -7. **GC pauses average** in ns - * avg: average duration of all GC stop-the-world pauses - -### configuration - -Please see the [wiki page](https://github.com/netdata/netdata/wiki/Monitoring-Go-Applications#using-netdata-go_expvar-module) for detailed info about module configuration. - ---- - -# haproxy - -Module monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current. -And health metrics such as backend servers status (server check should be used). - -Plugin can obtain data from url **OR** unix socket. - -**Requirement:** -Socket MUST be readable AND writable by netdata user. - -It produces: - -1. **Frontend** family charts - * Kilobytes in/s - * Kilobytes out/s - * Sessions current - * Sessions in queue current - -2. **Backend** family charts - * Kilobytes in/s - * Kilobytes out/s - * Sessions current - * Sessions in queue current - -3. **Health** chart - * number of failed servers for every backend (in DOWN state) - - -### configuration - -Sample: - -```yaml -via_url: - user : 'username' # ONLY IF stats auth is used - pass : 'password' # # ONLY IF stats auth is used - url : 'http://ip.address:port/url;csv;norefresh' -``` - -OR - -```yaml -via_socket: - socket : 'path/to/haproxy/sock' -``` - -If no configuration is given, module will fail to run. - ---- - -# hddtemp - -Module monitors disk temperatures from one or more hddtemp daemons. - -**Requirement:** -Running `hddtemp` in daemonized mode with access on tcp port - -It produces one chart **Temperature** with dynamic number of dimensions (one per disk) - -### configuration - -Sample: - -```yaml -update_every: 3 -host: "127.0.0.1" -port: 7634 -``` - -If no configuration is given, module will attempt to connect to hddtemp daemon on `127.0.0.1:7634` address - ---- - -# httpcheck - -Module monitors remote http server for availability and response time. - -Following charts are drawn per job: - -1. **Response time** ms - * Time in 0.1 ms resolution in which the server responds. - If the connection failed, the value is missing. - -2. **Status** boolean - * Connection successful - * Unexpected content: No Regex match found in the response - * Unexpected status code: Do we get 500 errors? - * Connection failed: port not listening or blocked - * Connection timed out: host or port unreachable - -### configuration - -Sample configuration and their default values. - -```yaml -server: - url: 'http://host:port/path' # required - status_accepted: # optional - - 200 - timeout: 1 # optional, supports decimals (e.g. 0.2) - update_every: 3 # optional - regex: 'REGULAR_EXPRESSION' # optional, see https://docs.python.org/3/howto/regex.html - redirect: yes # optional -``` - -### notes - - * The status chart is primarily intended for alarms, badges or for access via API. - * A system/service/firewall might block netdata's access if a portscan or - similar is detected. - * This plugin is meant for simple use cases. Currently, the accuracy of the - response time is low and should be used as reference only. - ---- - -# icecast - -This module will monitor number of listeners for active sources. - -**Requirements:** - * icecast version >= 2.4.0 - -It produces the following charts: - -1. **Listeners** in listeners - * source number - -### configuration - -Needs only `url` to server's `/status-json.xsl` - -Here is an example for remote server: - -```yaml -remote: - url : 'http://1.2.3.4:8443/status-json.xsl' -``` - -Without configuration, module attempts to connect to `http://localhost:8443/status-json.xsl` - ---- - -# IPFS - -Module monitors [IPFS](https://ipfs.io) basic information. - -1. **Bandwidth** in kbits/s - * in - * out - -2. **Peers** - * peers - -### configuration - -Only url to IPFS server is needed. - -Sample: - -```yaml -localhost: - name : 'local' - url : 'http://localhost:5001' -``` - ---- - -# isc_dhcpd - -Module monitor leases database to show all active leases for given pools. - -**Requirements:** - * dhcpd leases file MUST BE readable by netdata - * pools MUST BE in CIDR format - -It produces: - -1. **Pools utilization** Aggregate chart for all pools. - * utilization in percent - -2. **Total leases** - * leases (overall number of leases for all pools) - -3. **Active leases** for every pools - * leases (number of active leases in pool) - - -### configuration - -Sample: - -```yaml -local: - leases_path : '/var/lib/dhcp/dhcpd.leases' - pools : '192.168.3.0/24 192.168.4.0/24 192.168.5.0/24' -``` - -In case of python2 you need to install `py2-ipaddress` to make plugin work. -The module will not work If no configuration is given. - ---- - -# linux\_power\_supply - -This module monitors variosu metrics reported by power supply drivers -on Linux. This allows tracking and alerting on things like remaining -battery capacity. - -Depending on the uderlying driver, it may provide the following charts -and metrics: - -1. Capacity: The power supply capacity expressed as a percentage. - * capacity\_now - -2. Charge: The charge for the power supply, expressed as microamphours. - * charge\_full\_design - * charge\_full - * charge\_now - * charge\_empty - * charge\_empty\_design - -3. Energy: The energy for the power supply, expressed as microwatthours. - * energy\_full\_design - * energy\_full - * energy\_now - * energy\_empty - * energy\_empty\_design - -2. Voltage: The voltage for the power supply, expressed as microvolts. - * voltage\_max\_design - * voltage\_max - * voltage\_now - * voltage\_min - * voltage\_min\_design - -### configuration - -Sample: - -```yaml -battery: - supply: 'BAT0' - charts: 'capacity charge energy voltage' -``` - -The `supply` key specifies the name of the power supply device to monitor. -You can use `ls /sys/class/power_supply` to get a list of such devices -on your system. - -The `charts` key is a space separated list of which charts to try -to display. It defaults to trying to display everything. - -### notes - -* Most drivers provide at least the first chart. Battery powered ACPI -compliant systems (like most laptops) provide all but the third, but do -not provide all of the metrics for each chart. - -* Current, energy, and voltages are reported with a _very_ high precision -by the power\_supply framework. Usually, this is far higher than the -actual hardware supports reporting, so expect to see changes in these -charts jump instead of scaling smoothly. - -* If `max` or `full` attribute is defined by the driver, but not a -corresponding `min or `empty` attribute, then netdata will still provide -the corresponding `min` or `empty`, which will then always read as zero. -This way, alerts which match on these will still work. - ---- - -# litespeed - -Module monitor litespeed web server performance metrics. - -It produces: - -1. **Network Throughput HTTP** in kilobits/s - * in - * out - -2. **Network Throughput HTTPS** in kilobits/s - * in - * out - -3. **Connections HTTP** in connections - * free - * used - -4. **Connections HTTPS** in connections - * free - * used - -5. **Requests** in requests/s - * requests - -6. **Requests In Processing** in requests - * processing - -7. **Public Cache Hits** in hits/s - * hits - -8. **Private Cache Hits** in hits/s - * hits - -9. **Static Hits** in hits/s - * hits - - -### configuration -```yaml -local: - path : 'PATH' -``` - -If no configuration is given, module will use "/tmp/lshttpd/". - ---- - -# logind - -This module monitors active sessions, users, and seats tracked by systemd-logind or elogind. - -It provides the following charts: - -1. **Sessions** Tracks the total number of sessions. - * Graphical: Local graphical sessions (running X11, or Wayland, or something else). - * Console: Local console sessions. - * Remote: Remote sessions. - -2. **Users** Tracks total number of unique user logins of each type. - * Graphical - * Console - * Remote - -3. **Seats** Total number of seats in use. - * Seats - -### configuration - -This module needs no configuration. Just make sure the netdata user -can run the `loginctl` command and get a session list without having to -specify a path. - -This will work with any command that can output data in the _exact_ -same format as `loginctl list-sessions --no-legend`. If you have some -other command you want to use that outputs data in this format, you can -specify it using the `command` key like so: - -```yaml -command: '/path/to/other/command' -``` - -### notes - -* This module's ability to track logins is dependent on what PAM services -are configured to register sessions with logind. In particular, for -most systems, it will only track TTY logins, local desktop logins, -and logins through remote shell connections. - -* The users chart counts _usernames_ not UID's. This is potentially -important in configurations where multiple users have the same UID. - -* The users chart counts any given user name up to once for _each_ type -of login. So if the same user has a graphical and a console login on a -system, they will show up once in the graphical count, and once in the -console count. - -* Because the data collection process is rather expensive, this plugin -is currently disabled by default, and needs to be explicitly enabled in -`/etc/netdata/python.d.conf` before it will run. - ---- - -# mdstat - -Module monitor /proc/mdstat - -It produces: - -1. **Health** Number of failed disks in every array (aggregate chart). - -2. **Disks stats** - * total (number of devices array ideally would have) - * inuse (number of devices currently are in use) - -3. **Current status** - * resync in percent - * recovery in percent - * reshape in percent - * check in percent - -4. **Operation status** (if resync/recovery/reshape/check is active) - * finish in minutes - * speed in megabytes/s - -### configuration -No configuration is needed. - ---- - -# megacli - -Module collects adapter, physical drives and battery stats. - -**Requirements:** - * `netdata` user needs to be able to be able to sudo the `megacli` program without password - -To grab stats it executes: - * `sudo -n megacli -LDPDInfo -aAll` - * `sudo -n megacli -AdpBbuCmd -a0` - - -It produces: - -1. **Adapter State** - -2. **Physical Drives Media Errors** - -3. **Physical Drives Predictive Failures** - -4. **Battery Relative State of Charge** - -5. **Battery Cycle Count** - -### configuration -Battery stats disabled by default in the module configuration file. - ---- - -# memcached - -Memcached monitoring module. Data grabbed from [stats interface](https://github.com/memcached/memcached/wiki/Commands#stats). - -1. **Network** in kilobytes/s - * read - * written - -2. **Connections** per second - * current - * rejected - * total - -3. **Items** in cluster - * current - * total - -4. **Evicted and Reclaimed** items - * evicted - * reclaimed - -5. **GET** requests/s - * hits - * misses - -6. **GET rate** rate in requests/s - * rate - -7. **SET rate** rate in requests/s - * rate - -8. **DELETE** requests/s - * hits - * misses - -9. **CAS** requests/s - * hits - * misses - * bad value - -10. **Increment** requests/s - * hits - * misses - -11. **Decrement** requests/s - * hits - * misses - -12. **Touch** requests/s - * hits - * misses - -13. **Touch rate** rate in requests/s - * rate - -### configuration - -Sample: - -```yaml -localtcpip: - name : 'local' - host : '127.0.0.1' - port : 24242 -``` - -If no configuration is given, module will attempt to connect to memcached instance on `127.0.0.1:11211` address. - ---- - -# mongodb - -Module monitor mongodb performance and health metrics - -**Requirements:** - * `python-pymongo` package. - -You need to install it manually. - - -Number of charts depends on mongodb version, storage engine and other features (replication): - -1. **Read requests**: - * query - * getmore (operation the cursor executes to get additional data from query) - -2. **Write requests**: - * insert - * delete - * update - -3. **Active clients**: - * readers (number of clients with read operations in progress or queued) - * writers (number of clients with write operations in progress or queued) - -4. **Journal transactions**: - * commits (count of transactions that have been written to the journal) - -5. **Data written to the journal**: - * volume (volume of data) - -6. **Background flush** (MMAPv1): - * average ms (average time taken by flushes to execute) - * last ms (time taken by the last flush) - -8. **Read tickets** (WiredTiger): - * in use (number of read tickets in use) - * available (number of available read tickets remaining) - -9. **Write tickets** (WiredTiger): - * in use (number of write tickets in use) - * available (number of available write tickets remaining) - -10. **Cursors**: - * opened (number of cursors currently opened by MongoDB for clients) - * timedOut (number of cursors that have timed) - * noTimeout (number of open cursors with timeout disabled) - -11. **Connections**: - * connected (number of clients currently connected to the database server) - * unused (number of unused connections available for new clients) - -12. **Memory usage metrics**: - * virtual - * resident (amount of memory used by the database process) - * mapped - * non mapped - -13. **Page faults**: - * page faults (number of times MongoDB had to request from disk) - -14. **Cache metrics** (WiredTiger): - * percentage of bytes currently in the cache (amount of space taken by cached data) - * percantage of tracked dirty bytes in the cache (amount of space taken by dirty data) - -15. **Pages evicted from cache** (WiredTiger): - * modified - * unmodified - -16. **Queued requests**: - * readers (number of read request currently queued) - * writers (number of write request currently queued) - -17. **Errors**: - * msg (number of message assertions raised) - * warning (number of warning assertions raised) - * regular (number of regular assertions raised) - * user (number of assertions corresponding to errors generated by users) - -18. **Storage metrics** (one chart for every database) - * dataSize (size of all documents + padding in the database) - * indexSize (size of all indexes in the database) - * storageSize (size of all extents in the database) - -19. **Documents in the database** (one chart for all databases) - * documents (number of objects in the database among all the collections) - -20. **tcmalloc metrics** - * central cache free - * current total thread cache - * pageheap free - * pageheap unmapped - * thread cache free - * transfer cache free - * heap size - -21. **Commands total/failed rate** - * count - * createIndex - * delete - * eval - * findAndModify - * insert - -22. **Locks metrics** (acquireCount metrics - number of times the lock was acquired in the specified mode) - * Global lock - * Database lock - * Collection lock - * Metadata lock - * oplog lock - -23. **Replica set members state** - * state - -24. **Oplog window** - * window (interval of time between the oldest and the latest entries in the oplog) - -25. **Replication lag** - * member (time when last entry from the oplog was applied for every member) - -26. **Replication set member heartbeat latency** - * member (time when last heartbeat was received from replica set member) - - -### configuration - -Sample: - -```yaml -local: - name : 'local' - host : '127.0.0.1' - port : 27017 - user : 'netdata' - pass : 'netdata' - -``` - -If no configuration is given, module will attempt to connect to mongodb daemon on `127.0.0.1:27017` address - ---- - -# monit - -Monit monitoring module. Data is grabbed from stats XML interface (exists for a long time, but not mentioned in official documentation). Mostly this plugin shows statuses of monit targets, i.e. [statuses of specified checks](https://mmonit.com/monit/documentation/monit.html#Service-checks). - -1. **Filesystems** - * Filesystems - * Directories - * Files - * Pipes - -2. **Applications** - * Processes (+threads/childs) - * Programs - -3. **Network** - * Hosts (+latency) - * Network interfaces - -### configuration - -Sample: - -```yaml -local: - name : 'local' - url : 'http://localhost:2812' - user: : admin - pass: : monit -``` - -If no configuration is given, module will attempt to connect to monit as `http://localhost:2812`. - ---- - -# mysql - -Module monitors one or more mysql servers - -**Requirements:** - * python library [MySQLdb](https://github.com/PyMySQL/mysqlclient-python) (faster) or [PyMySQL](https://github.com/PyMySQL/PyMySQL) (slower) - -It will produce following charts (if data is available): - -1. **Bandwidth** in kbps - * in - * out - -2. **Queries** in queries/sec - * queries - * questions - * slow queries - -3. **Operations** in operations/sec - * opened tables - * flush - * commit - * delete - * prepare - * read first - * read key - * read next - * read prev - * read random - * read random next - * rollback - * save point - * update - * write - -4. **Table Locks** in locks/sec - * immediate - * waited - -5. **Select Issues** in issues/sec - * full join - * full range join - * range - * range check - * scan - -6. **Sort Issues** in issues/sec - * merge passes - * range - * scan - -### configuration - -You can provide, per server, the following: - -1. username which have access to database (defaults to 'root') -2. password (defaults to none) -3. mysql my.cnf configuration file -4. mysql socket (optional) -5. mysql host (ip or hostname) -6. mysql port (defaults to 3306) - -Here is an example for 3 servers: - -```yaml -update_every : 10 -priority : 90100 -retries : 5 - -local: - 'my.cnf' : '/etc/mysql/my.cnf' - priority : 90000 - -local_2: - user : 'root' - pass : 'blablablabla' - socket : '/var/run/mysqld/mysqld.sock' - update_every : 1 - -remote: - user : 'admin' - pass : 'bla' - host : 'example.org' - port : 9000 - retries : 20 -``` - -If no configuration is given, module will attempt to connect to mysql server via unix socket at `/var/run/mysqld/mysqld.sock` without password and with username `root` - ---- - -# nginx - -This module will monitor one or more nginx servers depending on configuration. Servers can be either local or remote. - -**Requirements:** - * nginx with configured 'ngx_http_stub_status_module' - * 'location /stub_status' - -Example nginx configuration can be found in 'python.d/nginx.conf' - -It produces following charts: - -1. **Active Connections** - * active - -2. **Requests** in requests/s - * requests - -3. **Active Connections by Status** - * reading - * writing - * waiting - -4. **Connections Rate** in connections/s - * accepts - * handled - -### configuration - -Needs only `url` to server's `stub_status` - -Here is an example for local server: - -```yaml -update_every : 10 -priority : 90100 - -local: - url : 'http://localhost/stub_status' - retries : 10 -``` - -Without configuration, module attempts to connect to `http://localhost/stub_status` - ---- - -# nginx_plus - -This module will monitor one or more nginx_plus servers depending on configuration. -Servers can be either local or remote. - -Example nginx_plus configuration can be found in 'python.d/nginx_plus.conf' - -It produces following charts: - -1. **Requests total** in requests/s - * total - -2. **Requests current** in requests - * current - -3. **Connection Statistics** in connections/s - * accepted - * dropped - -4. **Workers Statistics** in workers - * idle - * active - -5. **SSL Handshakes** in handshakes/s - * successful - * failed - -6. **SSL Session Reuses** in sessions/s - * reused - -7. **SSL Memory Usage** in percent - * usage - -8. **Processes** in processes - * respawned - -For every server zone: - -1. **Processing** in requests - * processing - -2. **Requests** in requests/s - * requests - -3. **Responses** in requests/s - * 1xx - * 2xx - * 3xx - * 4xx - * 5xx - -4. **Traffic** in kilobits/s - * received - * sent - -For every upstream: - -1. **Peers Requests** in requests/s - * peer name (dimension per peer) - -2. **All Peers Responses** in responses/s - * 1xx - * 2xx - * 3xx - * 4xx - * 5xx - -3. **Peer Responses** in requests/s (for every peer) - * 1xx - * 2xx - * 3xx - * 4xx - * 5xx - -4. **Peers Connections** in active - * peer name (dimension per peer) - -5. **Peers Connections Usage** in percent - * peer name (dimension per peer) - -6. **All Peers Traffic** in KB - * received - * sent - -7. **Peer Traffic** in KB/s (for every peer) - * received - * sent - -8. **Peer Timings** in ms (for every peer) - * header - * response - -9. **Memory Usage** in percent - * usage - -10. **Peers Status** in state - * peer name (dimension per peer) - -11. **Peers Total Downtime** in seconds - * peer name (dimension per peer) - -For every cache: - -1. **Traffic** in KB - * served - * written - * bypass - -2. **Memory Usage** in percent - * usage - -### configuration - -Needs only `url` to server's `status` - -Here is an example for local server: - -```yaml -local: - url : 'http://localhost/status' -``` - -Without configuration, module fail to start. - ---- - -# nsd - -Module uses the `nsd-control stats_noreset` command to provide `nsd` statistics. - -**Requirements:** - * Version of `nsd` must be 4.0+ - * Netdata must have permissions to run `nsd-control stats_noreset` - -It produces: - -1. **Queries** - * queries - -2. **Zones** - * master - * slave - -3. **Protocol** - * udp - * udp6 - * tcp - * tcp6 - -4. **Query Type** - * A - * NS - * CNAME - * SOA - * PTR - * HINFO - * MX - * NAPTR - * TXT - * AAAA - * SRV - * ANY - -5. **Transfer** - * NOTIFY - * AXFR - -6. **Return Code** - * NOERROR - * FORMERR - * SERVFAIL - * NXDOMAIN - * NOTIMP - * REFUSED - * YXDOMAIN - - -Configuration is not needed. - ---- - -# ntpd - -Module monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](http://doc.ntp.org/current-stable/ntpq.html). - -**Requirements:** - * Version: `NTPv4` - * Local interrogation allowed in `/etc/ntp.conf` (default): - -``` -# Local users may interrogate the ntp server more closely. -restrict 127.0.0.1 -restrict ::1 -``` - -It produces: - -1. system - * offset - * jitter - * frequency - * delay - * dispersion - * stratum - * tc - * precision - -2. peers - * offset - * delay - * dispersion - * jitter - * rootdelay - * rootdispersion - * stratum - * hmode - * pmode - * hpoll - * ppoll - * precision - -**configuration** - -Sample: - -```yaml -update_every: 10 - -host: 'localhost' -port: '123' -show_peers: yes -# hide peers with source address in ranges 127.0.0.0/8 and 192.168.0.0/16 -peer_filter: '(127\..*)|(192\.168\..*)' -# check for new/changed peers every 60 updates -peer_rescan: 60 -``` - -Sample (multiple jobs): - -Note: `ntp.conf` on the host `otherhost` must be configured to allow queries from our local host by including a line like `restrict <IP> nomodify notrap nopeer`. - -```yaml -local: - host: 'localhost' - -otherhost: - host: 'otherhost' -``` - -If no configuration is given, module will attempt to connect to `ntpd` on `::1:123` or `127.0.0.1:123` and show charts for the systemvars. Use `show_peers: yes` to also show the charts for configured peers. Local peers in the range `127.0.0.0/8` are hidden by default, use `peer_filter: ''` to show all peers. - ---- - -# ovpn_status_log - -Module monitor openvpn-status log file. - -**Requirements:** - - * If you are running multiple OpenVPN instances out of the same directory, MAKE SURE TO EDIT DIRECTIVES which create output files - so that multiple instances do not overwrite each other's output files. - - * Make sure NETDATA USER CAN READ openvpn-status.log - - * Update_every interval MUST MATCH interval on which OpenVPN writes operational status to log file. - -It produces: - -1. **Users** OpenVPN active users - * users - -2. **Traffic** OpenVPN overall bandwidth usage in kilobit/s - * in - * out - -### configuration - -Sample: - -```yaml -default - log_path : '/var/log/openvpn-status.log' -``` - ---- - -# phpfpm - -This module will monitor one or more php-fpm instances depending on configuration. - -**Requirements:** - * php-fpm with enabled `status` page - * access to `status` page via web server - -It produces following charts: - -1. **Active Connections** - * active - * maxActive - * idle - -2. **Requests** in requests/s - * requests - -3. **Performance** - * reached - * slow - -### configuration - -Needs only `url` to server's `status` - -Here is an example for local instance: - -```yaml -update_every : 3 -priority : 90100 - -local: - url : 'http://localhost/status' - retries : 10 -``` - -Without configuration, module attempts to connect to `http://localhost/status` - ---- - -# portcheck - -Module monitors a remote TCP service. - -Following charts are drawn per host: - -1. **Latency** ms - * Time required to connect to a TCP port. - Displays latency in 0.1 ms resolution. If the connection failed, the value is missing. - -2. **Status** boolean - * Connection successful - * Could not create socket: possible DNS problems - * Connection refused: port not listening or blocked - * Connection timed out: host or port unreachable - - -### configuration - -```yaml -server: - host: 'dns or ip' # required - port: 22 # required - timeout: 1 # optional - update_every: 1 # optional -``` - -### notes - - * The error chart is intended for alarms, badges or for access via API. - * A system/service/firewall might block netdata's access if a portscan or - similar is detected. - * Currently, the accuracy of the latency is low and should be used as reference only. - ---- - -# postfix - -Simple module executing `postfix -p` to grab postfix queue. - -It produces only two charts: - -1. **Postfix Queue Emails** - * emails - -2. **Postfix Queue Emails Size** in KB - * size - -Configuration is not needed. - ---- - -# postgres - -Module monitors one or more postgres servers. - -**Requirements:** - - * `python-psycopg2` package. You have to install it manually. - -Following charts are drawn: - -1. **Database size** MB - * size - -2. **Current Backend Processes** processes - * active - -3. **Write-Ahead Logging Statistics** files/s - * total - * ready - * done - -4. **Checkpoints** writes/s - * scheduled - * requested - -5. **Current connections to db** count - * connections - -6. **Tuples returned from db** tuples/s - * sequential - * bitmap - -7. **Tuple reads from db** reads/s - * disk - * cache - -8. **Transactions on db** transactions/s - * committed - * rolled back - -9. **Tuples written to db** writes/s - * inserted - * updated - * deleted - * conflicts - -10. **Locks on db** count per type - * locks - -### configuration - -```yaml -socket: - name : 'socket' - user : 'postgres' - database : 'postgres' - -tcp: - name : 'tcp' - user : 'postgres' - database : 'postgres' - host : 'localhost' - port : 5432 -``` - -When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:5432`. - ---- - -# powerdns - -Module monitor powerdns performance and health metrics. - -Powerdns charts: - -1. **Queries and Answers** - * udp-queries - * udp-answers - * tcp-queries - * tcp-answers - -2. **Cache Usage** - * query-cache-hit - * query-cache-miss - * packetcache-hit - * packetcache-miss - -3. **Cache Size** - * query-cache-size - * packetcache-size - * key-cache-size - * meta-cache-size - -4. **Latency** - * latency - - Powerdns Recursor charts: - - 1. **Questions In** - * questions - * ipv6-questions - * tcp-queries - -2. **Questions Out** - * all-outqueries - * ipv6-outqueries - * tcp-outqueries - * throttled-outqueries - -3. **Answer Times** - * answers-slow - * answers0-1 - * answers1-10 - * answers10-100 - * answers100-1000 - -4. **Timeouts** - * outgoing-timeouts - * outgoing4-timeouts - * outgoing6-timeouts - -5. **Drops** - * over-capacity-drops - -6. **Cache Usage** - * cache-hits - * cache-misses - * packetcache-hits - * packetcache-misses - -7. **Cache Size** - * cache-entries - * packetcache-entries - * negcache-entries - -### configuration - -```yaml -local: - name : 'local' - url : 'http://127.0.0.1:8081/api/v1/servers/localhost/statistics' - header : - X-API-Key: 'change_me' -``` - ---- - -# puppet - -Monitor status of Puppet Server and Puppet DB. - -Following charts are drawn: - -1. **JVM Heap** - * committed (allocated from OS) - * used (actual use) -2. **JVM Non-Heap** - * committed (allocated from OS) - * used (actual use) -3. **CPU Usage** - * execution - * GC (taken by garbage collection) -4. **File Descriptors** - * max - * used - - -### configuration - -```yaml -puppetdb: - url: 'https://fqdn.example.com:8081' - tls_cert_file: /path/to/client.crt - tls_key_file: /path/to/client.key - autodetection_retry: 1 - retries: 3600 - -puppetserver: - url: 'https://fqdn.example.com:8140' - autodetection_retry: 1 - retries: 3600 -``` - -When no configuration is given then `https://fqdn.example.com:8140` is -tried without any retries. - -### notes - -* Exact Fully Qualified Domain Name of the node should be used. -* Usually Puppet Server/DB startup time is VERY long. So, there should - be quite reasonable retry count. -* Secure PuppetDB config may require client certificate. Not applies - to default PuppetDB configuration though. - ---- - -# rabbitmq - -Module monitor rabbitmq performance and health metrics. - -Following charts are drawn: - -1. **Queued Messages** - * ready - * unacknowledged - -2. **Message Rates** - * ack - * redelivered - * deliver - * publish - -3. **Global Counts** - * channels - * consumers - * connections - * queues - * exchanges - -4. **File Descriptors** - * used descriptors - -5. **Socket Descriptors** - * used descriptors - -6. **Erlang processes** - * used processes - -7. **Erlang run queue** - * Erlang run queue - -8. **Memory** - * free memory in megabytes - -9. **Disk Space** - * free disk space in gigabytes - -### configuration - -```yaml -socket: - name : 'local' - host : '127.0.0.1' - port : 15672 - user : 'guest' - pass : 'guest' - -``` - -When no configuration file is found, module tries to connect to: `localhost:15672`. - ---- - -# redis - -Get INFO data from redis instance. - -Following charts are drawn: - -1. **Operations** per second - * operations - -2. **Hit rate** in percent - * rate - -3. **Memory utilization** in kilobytes - * total - * lua - -4. **Database keys** - * lines are creates dynamically based on how many databases are there - -5. **Clients** - * connected - * blocked - -6. **Slaves** - * connected - -### configuration - -```yaml -socket: - name : 'local' - socket : '/var/lib/redis/redis.sock' - -localhost: - name : 'local' - host : 'localhost' - port : 6379 -``` - -When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:6379`. - ---- - -# rethinkdb - -Module monitor rethinkdb health metrics. - -Following charts are drawn: - -1. **Connected Servers** - * connected - * missing - -2. **Active Clients** - * active - -3. **Queries** per second - * queries - -4. **Documents** per second - * documents - -### configuration - -```yaml - -localhost: - name : 'local' - host : '127.0.0.1' - port : 28015 - user : "user" - password : "pass" -``` - -When no configuration file is found, module tries to connect to `127.0.0.1:28015`. - ---- - -# samba - -Performance metrics of Samba file sharing. - -It produces the following charts: - -1. **Syscall R/Ws** in kilobytes/s - * sendfile - * recvfle - -2. **Smb2 R/Ws** in kilobytes/s - * readout - * writein - * readin - * writeout - -3. **Smb2 Create/Close** in operations/s - * create - * close - -4. **Smb2 Info** in operations/s - * getinfo - * setinfo - -5. **Smb2 Find** in operations/s - * find - -6. **Smb2 Notify** in operations/s - * notify - -7. **Smb2 Lesser Ops** as counters - * tcon - * negprot - * tdis - * cancel - * logoff - * flush - * lock - * keepalive - * break - * sessetup - -### configuration - -Requires that smbd has been compiled with profiling enabled. Also required -that `smbd` was started either with the `-P 1` option or inside `smb.conf` -using `smbd profiling level`. - -This plugin uses `smbstatus -P` which can only be executed by root. It uses -sudo and assumes that it is configured such that the `netdata` user can -execute smbstatus as root without password. - -For example: - - netdata ALL=(ALL) NOPASSWD: /usr/bin/smbstatus -P - -```yaml -update_every : 5 # update frequency -``` - ---- - -# sensors - -System sensors information. - -Charts are created dynamically. - -### configuration - -For detailed configuration information please read [`sensors.conf`](https://github.com/netdata/netdata/blob/master/conf.d/python.d/sensors.conf) file. - -### possible issues - -There have been reports from users that on certain servers, ACPI ring buffer errors are printed by the kernel (`dmesg`) when ACPI sensors are being accessed. -We are tracking such cases in issue [#827](https://github.com/netdata/netdata/issues/827). -Please join this discussion for help. - ---- - -# spigotmc - -This module does some really basic monitoring for Spigot Minecraft servers. - -It provides two charts, one tracking server-side ticks-per-second in -1, 5 and 15 minute averages, and one tracking the number of currently -active users. - -This is not compatible with Spigot plugins which change the format of -the data returned by the `tps` or `list` console commands. - -### configuration - -```yaml -host: localhost -port: 25575 -password: pass -``` - -By default, a connection to port 25575 on the local system is attempted with an empty password. - ---- - -# springboot - -This module will monitor one or more Java Spring-boot applications depending on configuration. - -It produces following charts: - -1. **Response Codes** in requests/s - * 1xx - * 2xx - * 3xx - * 4xx - * 5xx - * others - -2. **Threads** - * daemon - * total - -3. **GC Time** in milliseconds and **GC Operations** in operations/s - * Copy - * MarkSweep - * ... - -4. **Heap Mmeory Usage** in KB - * used - * committed - -### configuration - -Please see the [Monitoring Java Spring Boot Applications](https://github.com/netdata/netdata/wiki/Monitoring-Java-Spring-Boot-Applications) page for detailed info about module configuration. - ---- - -# squid - -This module will monitor one or more squid instances depending on configuration. - -It produces following charts: - -1. **Client Bandwidth** in kilobits/s - * in - * out - * hits - -2. **Client Requests** in requests/s - * requests - * hits - * errors - -3. **Server Bandwidth** in kilobits/s - * in - * out - -4. **Server Requests** in requests/s - * requests - * errors - -### configuration - -```yaml -priority : 50000 - -local: - request : 'cache_object://localhost:3128/counters' - host : 'localhost' - port : 3128 -``` - -Without any configuration module will try to autodetect where squid presents its `counters` data - ---- - -# smartd_log - -Module monitor `smartd` log files to collect HDD/SSD S.M.A.R.T attributes. - -It produces following charts (you can add additional attributes in the module configuration file): - -1. **Read Error Rate** attribute 1 - -2. **Start/Stop Count** attribute 4 - -3. **Reallocated Sectors Count** attribute 5 - -4. **Seek Error Rate** attribute 7 - -5. **Power-On Hours Count** attribute 9 - -6. **Power Cycle Count** attribute 12 - -7. **Load/Unload Cycles** attribute 193 - -8. **Temperature** attribute 194 - -9. **Current Pending Sectors** attribute 197 - -10. **Off-Line Uncorrectable** attribute 198 - -11. **Write Error Rate** attribute 200 - -### configuration - -```yaml -local: - log_path : '/var/log/smartd/' -``` - -If no configuration is given, module will attempt to read log files in /var/log/smartd/ directory. - ---- - -# tomcat - -Present tomcat containers memory utilization. - -Charts: - -1. **Requests** per second - * accesses - -2. **Volume** in KB/s - * volume - -3. **Threads** - * current - * busy - -4. **JVM Free Memory** in MB - * jvm - -### configuration - -```yaml -localhost: - name : 'local' - url : 'http://127.0.0.1:8080/manager/status?XML=true' - user : 'tomcat_username' - pass : 'secret_tomcat_password' -``` - -Without configuration, module attempts to connect to `http://localhost:8080/manager/status?XML=true`, without any credentials. -So it will probably fail. - ---- - -# Traefik - -Module uses the `health` API to provide statistics. - -It produces: - -1. **Responses** by statuses - * success (1xx, 2xx, 304) - * error (5xx) - * redirect (3xx except 304) - * bad (4xx) - * other (all other responses) - -2. **Responses** by codes - * 2xx (successful) - * 5xx (internal server errors) - * 3xx (redirect) - * 4xx (bad) - * 1xx (informational) - * other (non-standart responses) - -3. **Detailed Response Codes** requests/s (number of responses for each response code family individually) - -4. **Requests**/s - * request statistics - -5. **Total response time** - * sum of all response time - -6. **Average response time** - -7. **Average response time per iteration** - -8. **Uptime** - * Traefik server uptime - -### configuration - -Needs only `url` to server's `health` - -Here is an example for local server: - -```yaml -update_every : 1 -priority : 60000 - -local: - url : 'http://localhost:8080/health' - retries : 10 -``` - -Without configuration, module attempts to connect to `http://localhost:8080/health`. - ---- - -# Unbound - -Monitoring uses the remote control interface to fetch statistics. - -Provides the following charts: - -1. **Queries Processed** - * Ratelimited - * Cache Misses - * Cache Hits - * Expired - * Prefetched - * Recursive - -2. **Request List** - * Average Size - * Max Size - * Overwritten Requests - * Overruns - * Current Size - * User Requests - -3. **Recursion Timings** - * Average recursion processing time - * Median recursion processing time - -If extended stats are enabled, also provides: - -4. **Cache Sizes** - * Message Cache - * RRset Cache - * Infra Cache - * DNSSEC Key Cache - * DNSCrypt Shared Secret Cache - * DNSCrypt Nonce Cache - -### configuration - -Unbound must be manually configured to enable the remote-control protocol. -Check the Unbound documentation for info on how to do this. Additionally, -if you want to take advantage of the autodetection this plugin offers, -you will need to make sure your `unbound.conf` file only uses spaces for -indentation (the default config shipped by most distributions uses tabs -instead of spaces). - -Once you have the Unbound control protocol enabled, you need to make sure -that either the certificate and key are readable by Netdata (if you're -using the regular control interface), or that the socket is accessible -to Netdata (if you're using a UNIX socket for the contorl interface). - -By default, for the local system, everything can be auto-detected -assuming Unbound is configured correctly and has been told to listen -on the loopback interface or a UNIX socket. This is done by looking -up info in the Unbound config file specified by the `ubconf` key. - -To enable extended stats for a given job, add `extended: yes` to the -definition. - -You can also enable per-thread charts for a given job by adding -`per_thread: yes` to the definition. Note that the numbe rof threads -is only checked on startup. - -A basic local configuration with extended statistics and per-thread -charts looks like this: - -```yaml -local: - ubconf: /etc/unbound/unbound.conf - extended: yes - per_thread: yes -``` - -While it's a bit more complicated to set up correctly, it is recommended -that you use a UNIX socket as it provides far better performance. - ---- - -# varnish cache - -Module uses the `varnishstat` command to provide varnish cache statistics. - -It produces: - -1. **Connections Statistics** in connections/s - * accepted - * dropped - -2. **Client Requests** in requests/s - * received - -3. **All History Hit Rate Ratio** in percent - * hit - * miss - * hitpass - -4. **Current Poll Hit Rate Ratio** in percent - * hit - * miss - * hitpass - -5. **Expired Objects** in expired/s - * objects - -6. **Least Recently Used Nuked Objects** in nuked/s - * objects - - -7. **Number Of Threads In All Pools** in threads - * threads - -8. **Threads Statistics** in threads/s - * created - * failed - * limited - -9. **Current Queue Length** in requests - * in queue - -10. **Backend Connections Statistics** in connections/s - * successful - * unhealthy - * reused - * closed - * resycled - * failed - -10. **Requests To The Backend** in requests/s - * received - -11. **ESI Statistics** in problems/s - * errors - * warnings - -12. **Memory Usage** in MB - * free - * allocated - -13. **Uptime** in seconds - * uptime - - -### configuration - -No configuration is needed. - ---- - -# w1sensor - -Data from 1-Wire sensors. -On Linux these are supported by the wire, w1_gpio, and w1_therm modules. -Currently temperature sensors are supported and automatically detected. - -Charts are created dynamically based on the number of detected sensors. - -### configuration - -For detailed configuration information please read [`w1sensor.conf`](https://github.com/netdata/netdata/blob/master/conf.d/python.d/w1sensor.conf) file. - ---- - -# web_log - -Tails the apache/nginx/lighttpd/gunicorn log files to collect real-time web-server statistics. - -It produces following charts: - -1. **Response by type** requests/s - * success (1xx, 2xx, 304) - * error (5xx) - * redirect (3xx except 304) - * bad (4xx) - * other (all other responses) - -2. **Response by code family** requests/s - * 1xx (informational) - * 2xx (successful) - * 3xx (redirect) - * 4xx (bad) - * 5xx (internal server errors) - * other (non-standart responses) - * unmatched (the lines in the log file that are not matched) - -3. **Detailed Response Codes** requests/s (number of responses for each response code family individually) - -4. **Bandwidth** KB/s - * received (bandwidth of requests) - * send (bandwidth of responses) - -5. **Timings** ms (request processing time) - * min (bandwidth of requests) - * max (bandwidth of responses) - * average (bandwidth of responses) - -6. **Request per url** requests/s (configured by user) - -7. **Http Methods** requests/s (requests per http method) - -8. **Http Versions** requests/s (requests per http version) - -9. **IP protocols** requests/s (requests per ip protocol version) - -10. **Current Poll Unique Client IPs** unique ips/s (unique client IPs per data collection iteration) - -11. **All Time Unique Client IPs** unique ips/s (unique client IPs since the last restart of netdata) - - -### configuration - -```yaml -nginx_log: - name : 'nginx_log' - path : '/var/log/nginx/access.log' - -apache_log: - name : 'apache_log' - path : '/var/log/apache/other_vhosts_access.log' - categories: - cacti : 'cacti.*' - observium : 'observium' -``` - -Module has preconfigured jobs for nginx, apache and gunicorn on various distros. - ---- diff --git a/registry/Makefile.am b/registry/Makefile.am new file mode 100644 index 0000000000..1cb69ed99a --- /dev/null +++ b/registry/Makefile.am @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/registry/README.md b/registry/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/registry/registry.c b/registry/registry.c similarity index 99% rename from src/registry/registry.c rename to registry/registry.c index d2f6a7173a..4f97eb58fd 100644 --- a/src/registry/registry.c +++ b/registry/registry.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../common.h" +#include "../daemon/common.h" #include "registry_internals.h" #define REGISTRY_STATUS_OK "ok" diff --git a/src/registry/registry.h b/registry/registry.h similarity index 99% rename from src/registry/registry.h rename to registry/registry.h index 69185f7c43..ab36de014f 100644 --- a/src/registry/registry.h +++ b/registry/registry.h @@ -49,7 +49,7 @@ #ifndef NETDATA_REGISTRY_H #define NETDATA_REGISTRY_H 1 -#include "../common.h" +#include "../daemon/common.h" #define NETDATA_REGISTRY_COOKIE_NAME "netdata_registry_id" diff --git a/src/registry/registry_db.c b/registry/registry_db.c similarity index 99% rename from src/registry/registry_db.c rename to registry/registry_db.c index e37d626a04..d8e2bbd8dd 100644 --- a/src/registry/registry_db.c +++ b/registry/registry_db.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../common.h" +#include "../daemon/common.h" #include "registry_internals.h" int registry_db_should_be_saved(void) { diff --git a/src/registry/registry_init.c b/registry/registry_init.c similarity index 99% rename from src/registry/registry_init.c rename to registry/registry_init.c index e9ca97ed00..d3e0420d2c 100644 --- a/src/registry/registry_init.c +++ b/registry/registry_init.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../common.h" +#include "../daemon/common.h" #include "registry_internals.h" int registry_init(void) { diff --git a/src/registry/registry_internals.c b/registry/registry_internals.c similarity index 99% rename from src/registry/registry_internals.c rename to registry/registry_internals.c index 34f8e8a6dc..b54b901427 100644 --- a/src/registry/registry_internals.c +++ b/registry/registry_internals.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../common.h" +#include "../daemon/common.h" #include "registry_internals.h" struct registry registry; diff --git a/src/registry/registry_internals.h b/registry/registry_internals.h similarity index 100% rename from src/registry/registry_internals.h rename to registry/registry_internals.h diff --git a/src/registry/registry_log.c b/registry/registry_log.c similarity index 99% rename from src/registry/registry_log.c rename to registry/registry_log.c index 36ebf16390..e0e58ede35 100644 --- a/src/registry/registry_log.c +++ b/registry/registry_log.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../common.h" +#include "../daemon/common.h" #include "registry_internals.h" void registry_log(char action, REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name) { diff --git a/src/registry/registry_machine.c b/registry/registry_machine.c similarity index 99% rename from src/registry/registry_machine.c rename to registry/registry_machine.c index 071fe2ac02..8dbeb8ea6b 100644 --- a/src/registry/registry_machine.c +++ b/registry/registry_machine.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../common.h" +#include "../daemon/common.h" #include "registry_internals.h" // ---------------------------------------------------------------------------- diff --git a/src/registry/registry_machine.h b/registry/registry_machine.h similarity index 100% rename from src/registry/registry_machine.h rename to registry/registry_machine.h diff --git a/src/registry/registry_person.c b/registry/registry_person.c similarity index 99% rename from src/registry/registry_person.c rename to registry/registry_person.c index 59086418d3..53e3f47f42 100644 --- a/src/registry/registry_person.c +++ b/registry/registry_person.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../common.h" +#include "../daemon/common.h" #include "registry_internals.h" // ---------------------------------------------------------------------------- diff --git a/src/registry/registry_person.h b/registry/registry_person.h similarity index 100% rename from src/registry/registry_person.h rename to registry/registry_person.h diff --git a/src/registry/registry_url.c b/registry/registry_url.c similarity index 99% rename from src/registry/registry_url.c rename to registry/registry_url.c index df5dfe3739..6a71064588 100644 --- a/src/registry/registry_url.c +++ b/registry/registry_url.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../common.h" +#include "../daemon/common.h" #include "registry_internals.h" // ---------------------------------------------------------------------------- diff --git a/src/registry/registry_url.h b/registry/registry_url.h similarity index 100% rename from src/registry/registry_url.h rename to registry/registry_url.h diff --git a/src/Makefile.am b/src/Makefile.am deleted file mode 100644 index 02b0f6eaec..0000000000 --- a/src/Makefile.am +++ /dev/null @@ -1,363 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES= $(srcdir)/Makefile.in - -SUBDIRS = \ - api \ - backends \ - database \ - health \ - libnetdata \ - plugins \ - registry \ - streaming \ - webserver \ - $(NULL) - -AM_CFLAGS = \ - $(OPTIONAL_MATH_CFLAGS) \ - $(OPTIONAL_NFACCT_CLFAGS) \ - $(OPTIONAL_ZLIB_CFLAGS) \ - $(OPTIONAL_UUID_CFLAGS) \ - $(OPTIONAL_LIBCAP_LIBS) \ - $(OPTIONAL_IPMIMONITORING_CFLAGS) \ - $(NULL) - -sbin_PROGRAMS = -dist_cache_DATA = .keep -dist_varlib_DATA = .keep -dist_registry_DATA = .keep -dist_log_DATA = .keep -plugins_PROGRAMS = - -LIBNETDATA_FILES = \ - libnetdata/adaptive_resortable_list.c \ - libnetdata/adaptive_resortable_list.h \ - libnetdata/appconfig.c \ - libnetdata/appconfig.h \ - libnetdata/avl.c \ - libnetdata/avl.h \ - libnetdata/clocks.c \ - libnetdata/clocks.h \ - libnetdata/common.c \ - libnetdata/dictionary.c \ - libnetdata/dictionary.h \ - libnetdata/eval.c \ - libnetdata/eval.h \ - libnetdata/inlined.h \ - libnetdata/libnetdata.h \ - libnetdata/locks.c \ - libnetdata/locks.h \ - libnetdata/log.c \ - libnetdata/log.h \ - libnetdata/popen.c \ - libnetdata/popen.h \ - libnetdata/procfile.c \ - libnetdata/procfile.h \ - libnetdata/os.c \ - libnetdata/os.h \ - libnetdata/simple_pattern.c \ - libnetdata/simple_pattern.h \ - libnetdata/socket.c \ - libnetdata/socket.h \ - libnetdata/statistical.c \ - libnetdata/statistical.h \ - libnetdata/storage_number.c \ - libnetdata/storage_number.h \ - libnetdata/threads.c \ - libnetdata/threads.h \ - libnetdata/web_buffer.c \ - libnetdata/web_buffer.h \ - libnetdata/url.c \ - libnetdata/url.h \ - $(NULL) - -APPS_PLUGIN_FILES = \ - plugins/apps.plugin/apps_plugin.c \ - $(LIBNETDATA_FILES) \ - $(NULL) - -CHECKS_PLUGIN_FILES = \ - plugins/checks.plugin/plugin_checks.c \ - plugins/checks.plugin/plugin_checks.h \ - $(NULL) - -FREEBSD_PLUGIN_FILES = \ - plugins/freebsd.plugin/plugin_freebsd.c \ - plugins/freebsd.plugin/plugin_freebsd.h \ - plugins/freebsd.plugin/freebsd_sysctl.c \ - plugins/freebsd.plugin/freebsd_getmntinfo.c \ - plugins/freebsd.plugin/freebsd_getifaddrs.c \ - plugins/freebsd.plugin/freebsd_devstat.c \ - plugins/freebsd.plugin/freebsd_kstat_zfs.c \ - plugins/freebsd.plugin/freebsd_ipfw.c \ - plugins/linux-proc.plugin/zfs_common.c \ - plugins/linux-proc.plugin/zfs_common.h \ - $(NULL) - -HEALTH_PLUGIN_FILES = \ - health/health.c \ - health/health.h \ - health/health_config.c \ - health/health_json.c \ - health/health_log.c \ - $(NULL) - -IDLEJITTER_PLUGIN_FILES = \ - plugins/idlejitter.plugin/plugin_idlejitter.c \ - plugins/idlejitter.plugin/plugin_idlejitter.h \ - $(NULL) - -CGROUPS_PLUGIN_FILES = \ - plugins/linux-cgroups.plugin/sys_fs_cgroup.c \ - plugins/linux-cgroups.plugin/sys_fs_cgroup.h \ - $(NULL) - -CGROUP_NETWORK_FILES = \ - plugins/linux-cgroups.plugin/cgroup-network.c \ - $(LIBNETDATA_FILES) \ - $(NULL) - -DISKSPACE_PLUGIN_FILES = \ - plugins/linux-diskspace.plugin/plugin_diskspace.h \ - plugins/linux-diskspace.plugin/plugin_diskspace.c \ - $(NULL) - -FREEIPMI_PLUGIN_FILES = \ - plugins/linux-freeipmi.plugin/freeipmi_plugin.c \ - $(LIBNETDATA_FILES) \ - $(NULL) - -NFACCT_PLUGIN_FILES = \ - plugins/linux-nfacct.plugin/plugin_nfacct.c \ - plugins/linux-nfacct.plugin/plugin_nfacct.h \ - $(NULL) - -PROC_PLUGIN_FILES = \ - plugins/linux-proc.plugin/ipc.c \ - plugins/linux-proc.plugin/plugin_proc.c \ - plugins/linux-proc.plugin/plugin_proc.h \ - plugins/linux-proc.plugin/proc_diskstats.c \ - plugins/linux-proc.plugin/proc_interrupts.c \ - plugins/linux-proc.plugin/proc_softirqs.c \ - plugins/linux-proc.plugin/proc_loadavg.c \ - plugins/linux-proc.plugin/proc_meminfo.c \ - plugins/linux-proc.plugin/proc_net_dev.c \ - plugins/linux-proc.plugin/proc_net_ip_vs_stats.c \ - plugins/linux-proc.plugin/proc_net_netstat.c \ - plugins/linux-proc.plugin/proc_net_rpc_nfs.c \ - plugins/linux-proc.plugin/proc_net_rpc_nfsd.c \ - plugins/linux-proc.plugin/proc_net_snmp.c \ - plugins/linux-proc.plugin/proc_net_snmp6.c \ - plugins/linux-proc.plugin/proc_net_sctp_snmp.c \ - plugins/linux-proc.plugin/proc_net_sockstat.c \ - plugins/linux-proc.plugin/proc_net_sockstat6.c \ - plugins/linux-proc.plugin/proc_net_softnet_stat.c \ - plugins/linux-proc.plugin/proc_net_stat_conntrack.c \ - plugins/linux-proc.plugin/proc_net_stat_synproxy.c \ - plugins/linux-proc.plugin/proc_self_mountinfo.c \ - plugins/linux-proc.plugin/proc_self_mountinfo.h \ - plugins/linux-proc.plugin/zfs_common.c \ - plugins/linux-proc.plugin/zfs_common.h \ - plugins/linux-proc.plugin/proc_spl_kstat_zfs.c \ - plugins/linux-proc.plugin/proc_stat.c \ - plugins/linux-proc.plugin/proc_sys_kernel_random_entropy_avail.c \ - plugins/linux-proc.plugin/proc_vmstat.c \ - plugins/linux-proc.plugin/proc_uptime.c \ - plugins/linux-proc.plugin/sys_kernel_mm_ksm.c \ - plugins/linux-proc.plugin/sys_devices_system_edac_mc.c \ - plugins/linux-proc.plugin/sys_devices_system_node.c \ - plugins/linux-proc.plugin/sys_fs_btrfs.c \ - $(NULL) - -TC_PLUGIN_FILES = \ - plugins/linux-tc.plugin/plugin_tc.c \ - plugins/linux-tc.plugin/plugin_tc.h \ - $(NULL) - -MACOS_PLUGIN_FILES = \ - plugins/macos.plugin/plugin_macos.c \ - plugins/macos.plugin/plugin_macos.h \ - plugins/macos.plugin/macos_sysctl.c \ - plugins/macos.plugin/macos_mach_smi.c \ - plugins/macos.plugin/macos_fw.c \ - $(NULL) - -PLUGINSD_PLUGIN_FILES = \ - plugins/plugins.d.plugin/plugins_d.c \ - plugins/plugins.d.plugin/plugins_d.h \ - $(NULL) - -RRD_PLUGIN_FILES = \ - database/rrdcalc.c \ - database/rrdcalc.h \ - database/rrdcalctemplate.c \ - database/rrdcalctemplate.h \ - database/rrddim.c \ - database/rrddimvar.c \ - database/rrddimvar.h \ - database/rrdfamily.c \ - database/rrdhost.c \ - database/rrd.c \ - database/rrd.h \ - database/rrdset.c \ - database/rrdsetvar.c \ - database/rrdsetvar.h \ - database/rrdvar.c \ - database/rrdvar.h \ - $(NULL) - -API_PLUGIN_FILES = \ - api/rrd2json.c \ - api/rrd2json.h \ - api/web_api_v1.c \ - api/web_api_v1.h \ - api/web_buffer_svg.c \ - api/web_buffer_svg.h \ - $(NULL) - -STREAMING_PLUGIN_FILES = \ - streaming/rrdpush.c \ - streaming/rrdpush.h \ - $(NULL) - -REGISTRY_PLUGIN_FILES = \ - registry/registry.c \ - registry/registry.h \ - registry/registry_db.c \ - registry/registry_init.c \ - registry/registry_internals.c \ - registry/registry_internals.h \ - registry/registry_log.c \ - registry/registry_machine.c \ - registry/registry_machine.h \ - registry/registry_person.c \ - registry/registry_person.h \ - registry/registry_url.c \ - registry/registry_url.h \ - $(NULL) - -STATSD_PLUGIN_FILES = \ - plugins/statsd.plugin/statsd.c \ - plugins/statsd.plugin/statsd.h \ - $(NULL) - -WEB_PLGUGIN_FILES = \ - webserver/web_client.c \ - webserver/web_client.h \ - webserver/web_server.c \ - webserver/web_server.h \ - $(NULL) - -BACKENDS_PLUGIN_FILES = \ - backends/backends.c \ - backends/backends.h \ - backends/graphite/graphite.c \ - backends/graphite/graphite.h \ - backends/json/json.c \ - backends/json/json.h \ - backends/opentsdb/opentsdb.c \ - backends/opentsdb/opentsdb.h \ - backends/prometheus/backend_prometheus.c \ - backends/prometheus/backend_prometheus.h \ - $(NULL) - -WEB_PLUGIN_FILES = \ - webserver/web_client.c \ - webserver/web_client.h \ - webserver/web_server.c \ - webserver/web_server.h \ - $(NULL) - -NETDATA_FILES = \ - plugins/all.h \ - common.c \ - common.h \ - daemon.c \ - daemon.h \ - global_statistics.c \ - global_statistics.h \ - main.c \ - main.h \ - signals.c \ - signals.h \ - unit_test.c \ - unit_test.h \ - $(LIBNETDATA_FILES) \ - $(API_PLUGIN_FILES) \ - $(BACKENDS_PLUGIN_FILES) \ - $(CHECKS_PLUGIN_FILES) \ - $(HEALTH_PLUGIN_FILES) \ - $(IDLEJITTER_PLUGIN_FILES) \ - $(PLUGINSD_PLUGIN_FILES) \ - $(REGISTRY_PLUGIN_FILES) \ - $(RRD_PLUGIN_FILES) \ - $(STREAMING_PLUGIN_FILES) \ - $(STATSD_PLUGIN_FILES) \ - $(WEB_PLUGIN_FILES) \ - $(NULL) - -if FREEBSD - NETDATA_FILES += \ - $(FREEBSD_PLUGIN_FILES) \ - $(NULL) -endif - -if MACOS - NETDATA_FILES += \ - $(MACOS_PLUGIN_FILES) \ - $(NULL) -endif - -if LINUX - NETDATA_FILES += \ - $(CGROUPS_PLUGIN_FILES) \ - $(DISKSPACE_PLUGIN_FILES) \ - $(NFACCT_PLUGIN_FILES) \ - $(PROC_PLUGIN_FILES) \ - $(TC_PLUGIN_FILES) \ - $(NULL) - -endif - -NETDATA_COMMON_LIBS = \ - $(OPTIONAL_MATH_LIBS) \ - $(OPTIONAL_ZLIB_LIBS) \ - $(OPTIONAL_UUID_LIBS) \ - $(NULL) - - -sbin_PROGRAMS += netdata -netdata_SOURCES = ../config.h $(NETDATA_FILES) -netdata_LDADD = \ - $(NETDATA_COMMON_LIBS) \ - $(OPTIONAL_NFACCT_LIBS) \ - $(NULL) - -if ENABLE_PLUGIN_APPS - plugins_PROGRAMS += apps.plugin - apps_plugin_SOURCES = ../config.h $(APPS_PLUGIN_FILES) - apps_plugin_LDADD = \ - $(NETDATA_COMMON_LIBS) \ - $(OPTIONAL_LIBCAP_LIBS) \ - $(NULL) -endif - -if ENABLE_PLUGIN_CGROUP_NETWORK - plugins_PROGRAMS += cgroup-network - cgroup_network_SOURCES = ../config.h $(CGROUP_NETWORK_FILES) - cgroup_network_LDADD = \ - $(NETDATA_COMMON_LIBS) \ - $(NULL) -endif - -if ENABLE_PLUGIN_FREEIPMI - plugins_PROGRAMS += freeipmi.plugin - freeipmi_plugin_SOURCES = ../config.h $(FREEIPMI_PLUGIN_FILES) - freeipmi_plugin_LDADD = \ - $(NETDATA_COMMON_LIBS) \ - $(OPTIONAL_IPMIMONITORING_LIBS) \ - $(NULL) -endif - diff --git a/src/backends/prometheus/Makefile.am b/src/backends/prometheus/Makefile.am deleted file mode 100644 index 8773fd098c..0000000000 --- a/src/backends/prometheus/Makefile.am +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in diff --git a/src/database/Makefile.am b/src/database/Makefile.am deleted file mode 100644 index 8773fd098c..0000000000 --- a/src/database/Makefile.am +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in diff --git a/src/health/Makefile.am b/src/health/Makefile.am deleted file mode 100644 index 8773fd098c..0000000000 --- a/src/health/Makefile.am +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in diff --git a/src/libnetdata/Makefile.am b/src/libnetdata/Makefile.am deleted file mode 100644 index 13fbf82599..0000000000 --- a/src/libnetdata/Makefile.am +++ /dev/null @@ -1,5 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -MAINTAINERCLEANFILES = Makefile.in - - diff --git a/src/plugins/Makefile.am b/src/plugins/Makefile.am deleted file mode 100644 index 5bcb2b520f..0000000000 --- a/src/plugins/Makefile.am +++ /dev/null @@ -1,38 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -MAINTAINERCLEANFILES = Makefile.in - -SUBDIRS = \ - apps.plugin \ - checks.plugin \ - idlejitter.plugin \ - plugins.d.plugin \ - statsd.plugin \ - $(NULL) - -if FREEBSD - -SUBDIRS += \ - freebsd.plugin \ - $(NULL) - -else -if MACOS - -SUBDIRS += \ - macos.plugin \ - $(NULL) - -else - -SUBDIRS += \ - linux-cgroups.plugin \ - linux-diskspace.plugin \ - linux-freeipmi.plugin \ - linux-nfacct.plugin \ - linux-proc.plugin \ - linux-tc.plugin \ - $(NULL) - -endif -endif diff --git a/src/plugins/checks.plugin/Makefile.am b/src/plugins/checks.plugin/Makefile.am deleted file mode 100644 index 8773fd098c..0000000000 --- a/src/plugins/checks.plugin/Makefile.am +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in diff --git a/src/plugins/freebsd.plugin/Makefile.am b/src/plugins/freebsd.plugin/Makefile.am deleted file mode 100644 index 8773fd098c..0000000000 --- a/src/plugins/freebsd.plugin/Makefile.am +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in diff --git a/src/plugins/idlejitter.plugin/Makefile.am b/src/plugins/idlejitter.plugin/Makefile.am deleted file mode 100644 index 8773fd098c..0000000000 --- a/src/plugins/idlejitter.plugin/Makefile.am +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in diff --git a/src/plugins/linux-cgroups.plugin/Makefile.am b/src/plugins/linux-cgroups.plugin/Makefile.am deleted file mode 100644 index 14f3826017..0000000000 --- a/src/plugins/linux-cgroups.plugin/Makefile.am +++ /dev/null @@ -1,28 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in - -CLEANFILES = \ - cgroup-name.sh \ - $(NULL) - -cgroup-name.sh: cgroup-name.sh.in - if sed \ - -e 's#[@]configdir_POST@#$(configdir)#g' \ - -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \ - $< > $@.tmp; then \ - mv "$@.tmp" "$@"; \ - else \ - rm -f "$@.tmp"; \ - false; \ - fi - -dist_plugins_SCRIPTS = \ - cgroup-name.sh \ - cgroup-network-helper.sh \ - $(NULL) - -dist_noinst_DATA = \ - cgroup-name.sh.in \ - $(NULL) diff --git a/src/plugins/linux-diskspace.plugin/Makefile.am b/src/plugins/linux-diskspace.plugin/Makefile.am deleted file mode 100644 index 8773fd098c..0000000000 --- a/src/plugins/linux-diskspace.plugin/Makefile.am +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in diff --git a/src/plugins/linux-freeipmi.plugin/Makefile.am b/src/plugins/linux-freeipmi.plugin/Makefile.am deleted file mode 100644 index 20504a2c6e..0000000000 --- a/src/plugins/linux-freeipmi.plugin/Makefile.am +++ /dev/null @@ -1,5 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in - diff --git a/src/plugins/linux-nfacct.plugin/Makefile.am b/src/plugins/linux-nfacct.plugin/Makefile.am deleted file mode 100644 index 8773fd098c..0000000000 --- a/src/plugins/linux-nfacct.plugin/Makefile.am +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in diff --git a/src/plugins/linux-proc.plugin/Makefile.am b/src/plugins/linux-proc.plugin/Makefile.am deleted file mode 100644 index 20504a2c6e..0000000000 --- a/src/plugins/linux-proc.plugin/Makefile.am +++ /dev/null @@ -1,5 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in - diff --git a/src/plugins/linux-tc.plugin/Makefile.am b/src/plugins/linux-tc.plugin/Makefile.am deleted file mode 100644 index 20504a2c6e..0000000000 --- a/src/plugins/linux-tc.plugin/Makefile.am +++ /dev/null @@ -1,5 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in - diff --git a/src/plugins/macos.plugin/Makefile.am b/src/plugins/macos.plugin/Makefile.am deleted file mode 100644 index 8773fd098c..0000000000 --- a/src/plugins/macos.plugin/Makefile.am +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in diff --git a/src/plugins/plugins.d.plugin/Makefile.am b/src/plugins/plugins.d.plugin/Makefile.am deleted file mode 100644 index 20504a2c6e..0000000000 --- a/src/plugins/plugins.d.plugin/Makefile.am +++ /dev/null @@ -1,5 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in - diff --git a/src/plugins/statsd.plugin/Makefile.am b/src/plugins/statsd.plugin/Makefile.am deleted file mode 100644 index 20504a2c6e..0000000000 --- a/src/plugins/statsd.plugin/Makefile.am +++ /dev/null @@ -1,5 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in - diff --git a/src/registry/Makefile.am b/src/registry/Makefile.am deleted file mode 100644 index 8773fd098c..0000000000 --- a/src/registry/Makefile.am +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in diff --git a/src/streaming/Makefile.am b/src/streaming/Makefile.am deleted file mode 100644 index 8773fd098c..0000000000 --- a/src/streaming/Makefile.am +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in diff --git a/src/webserver/Makefile.am b/src/webserver/Makefile.am deleted file mode 100644 index 8773fd098c..0000000000 --- a/src/webserver/Makefile.am +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = Makefile.in diff --git a/src/webserver/web_server.c b/src/webserver/web_server.c deleted file mode 100644 index 2bd3721874..0000000000 --- a/src/webserver/web_server.c +++ /dev/null @@ -1,1298 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "web_server.h" - -// this file includes 3 web servers: -// -// 1. single-threaded, based on select() -// 2. multi-threaded, based on poll() that spawns threads to handle the requests, based on select() -// 3. static-threaded, based on poll() using a fixed number of threads (configured at netdata.conf) - -WEB_SERVER_MODE web_server_mode = WEB_SERVER_MODE_STATIC_THREADED; - -// -------------------------------------------------------------------------------------- - -WEB_SERVER_MODE web_server_mode_id(const char *mode) { - if(!strcmp(mode, "none")) - return WEB_SERVER_MODE_NONE; - else if(!strcmp(mode, "single") || !strcmp(mode, "single-threaded")) - return WEB_SERVER_MODE_SINGLE_THREADED; - else if(!strcmp(mode, "static") || !strcmp(mode, "static-threaded")) - return WEB_SERVER_MODE_STATIC_THREADED; - else // if(!strcmp(mode, "multi") || !strcmp(mode, "multi-threaded")) - return WEB_SERVER_MODE_MULTI_THREADED; -} - -const char *web_server_mode_name(WEB_SERVER_MODE id) { - switch(id) { - case WEB_SERVER_MODE_NONE: - return "none"; - - case WEB_SERVER_MODE_SINGLE_THREADED: - return "single-threaded"; - - case WEB_SERVER_MODE_STATIC_THREADED: - return "static-threaded"; - - default: - case WEB_SERVER_MODE_MULTI_THREADED: - return "multi-threaded"; - } -} - -// -------------------------------------------------------------------------------------- -// API sockets - -static LISTEN_SOCKETS api_sockets = { - .config_section = CONFIG_SECTION_WEB, - .default_bind_to = "*", - .default_port = API_LISTEN_PORT, - .backlog = API_LISTEN_BACKLOG -}; - -int api_listen_sockets_setup(void) { - int socks = listen_sockets_setup(&api_sockets); - - if(!socks) - fatal("LISTENER: Cannot listen on any API socket. Exiting..."); - - return socks; -} - - -// -------------------------------------------------------------------------------------- -// access lists - -SIMPLE_PATTERN *web_allow_connections_from = NULL; -SIMPLE_PATTERN *web_allow_streaming_from = NULL; -SIMPLE_PATTERN *web_allow_netdataconf_from = NULL; - -// WEB_CLIENT_ACL -SIMPLE_PATTERN *web_allow_dashboard_from = NULL; -SIMPLE_PATTERN *web_allow_registry_from = NULL; -SIMPLE_PATTERN *web_allow_badges_from = NULL; - -static void web_client_update_acl_matches(struct web_client *w) { - w->acl = WEB_CLIENT_ACL_NONE; - - if(!web_allow_dashboard_from || simple_pattern_matches(web_allow_dashboard_from, w->client_ip)) - w->acl |= WEB_CLIENT_ACL_DASHBOARD; - - if(!web_allow_registry_from || simple_pattern_matches(web_allow_registry_from, w->client_ip)) - w->acl |= WEB_CLIENT_ACL_REGISTRY; - - if(!web_allow_badges_from || simple_pattern_matches(web_allow_badges_from, w->client_ip)) - w->acl |= WEB_CLIENT_ACL_BADGE; -} - - -// -------------------------------------------------------------------------------------- - -static void log_connection(struct web_client *w, const char *msg) { - log_access("%llu: %d '[%s]:%s' '%s'", w->id, gettid(), w->client_ip, w->client_port, msg); -} - -// ---------------------------------------------------------------------------- -// allocate and free web_clients - -static void web_client_zero(struct web_client *w) { - // zero everything about it - but keep the buffers - - // remember the pointers to the buffers - BUFFER *b1 = w->response.data; - BUFFER *b2 = w->response.header; - BUFFER *b3 = w->response.header_output; - - // empty the buffers - buffer_flush(b1); - buffer_flush(b2); - buffer_flush(b3); - - freez(w->user_agent); - - // zero everything - memset(w, 0, sizeof(struct web_client)); - - // restore the pointers of the buffers - w->response.data = b1; - w->response.header = b2; - w->response.header_output = b3; -} - -static void web_client_free(struct web_client *w) { - buffer_free(w->response.header_output); - buffer_free(w->response.header); - buffer_free(w->response.data); - freez(w->user_agent); - freez(w); -} - -static struct web_client *web_client_alloc(void) { - struct web_client *w = callocz(1, sizeof(struct web_client)); - w->response.data = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE); - w->response.header = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE); - w->response.header_output = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE); - return w; -} - -// ---------------------------------------------------------------------------- -// web clients caching - -// When clients connect and disconnect, avoid allocating and releasing memory. -// Instead, when new clients get connected, reuse any memory previously allocated -// for serving web clients that are now disconnected. - -// The size of the cache is adaptive. It caches the structures of 2x -// the number of currently connected clients. - -// Comments per server: -// SINGLE-THREADED : 1 cache is maintained -// MULTI-THREADED : 1 cache is maintained -// STATIC-THREADED : 1 cache for each thred of the web server - -struct clients_cache { - pid_t pid; - - struct web_client *used; // the structures of the currently connected clients - size_t used_count; // the count the currently connected clients - - struct web_client *avail; // the cached structures, available for future clients - size_t avail_count; // the number of cached structures - - size_t reused; // the number of re-uses - size_t allocated; // the number of allocations -}; - -static __thread struct clients_cache web_clients_cache = { - .pid = 0, - .used = NULL, - .used_count = 0, - .avail = NULL, - .avail_count = 0, - .allocated = 0, - .reused = 0 -}; - -static inline void web_client_cache_verify(int force) { -#ifdef NETDATA_INTERNAL_CHECKS - static __thread size_t count = 0; - count++; - - if(unlikely(force || count > 1000)) { - count = 0; - - struct web_client *w; - size_t used = 0, avail = 0; - for(w = web_clients_cache.used; w ; w = w->next) used++; - for(w = web_clients_cache.avail; w ; w = w->next) avail++; - - info("web_client_cache has %zu (%zu) used and %zu (%zu) available clients, allocated %zu, reused %zu (hit %zu%%)." - , used, web_clients_cache.used_count - , avail, web_clients_cache.avail_count - , web_clients_cache.allocated - , web_clients_cache.reused - , (web_clients_cache.allocated + web_clients_cache.reused)?(web_clients_cache.reused * 100 / (web_clients_cache.allocated + web_clients_cache.reused)):0 - ); - } -#else - if(unlikely(force)) { - info("web_client_cache has %zu used and %zu available clients, allocated %zu, reused %zu (hit %zu%%)." - , web_clients_cache.used_count - , web_clients_cache.avail_count - , web_clients_cache.allocated - , web_clients_cache.reused - , (web_clients_cache.allocated + web_clients_cache.reused)?(web_clients_cache.reused * 100 / (web_clients_cache.allocated + web_clients_cache.reused)):0 - ); - } -#endif -} - -// destroy the cache and free all the memory it uses -static void web_client_cache_destroy(void) { -#ifdef NETDATA_INTERNAL_CHECKS - if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid())) - error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid()); - - web_client_cache_verify(1); -#endif - - netdata_thread_disable_cancelability(); - - struct web_client *w, *t; - - w = web_clients_cache.used; - while(w) { - t = w; - w = w->next; - web_client_free(t); - } - web_clients_cache.used = NULL; - web_clients_cache.used_count = 0; - - w = web_clients_cache.avail; - while(w) { - t = w; - w = w->next; - web_client_free(t); - } - web_clients_cache.avail = NULL; - web_clients_cache.avail_count = 0; - - netdata_thread_enable_cancelability(); -} - -static struct web_client *web_client_get_from_cache_or_allocate() { - -#ifdef NETDATA_INTERNAL_CHECKS - if(unlikely(web_clients_cache.pid == 0)) - web_clients_cache.pid = gettid(); - - if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid())) - error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid()); -#endif - - netdata_thread_disable_cancelability(); - - struct web_client *w = web_clients_cache.avail; - - if(w) { - // get it from avail - if (w == web_clients_cache.avail) web_clients_cache.avail = w->next; - if(w->prev) w->prev->next = w->next; - if(w->next) w->next->prev = w->prev; - web_clients_cache.avail_count--; - web_client_zero(w); - web_clients_cache.reused++; - } - else { - // allocate it - w = web_client_alloc(); - web_clients_cache.allocated++; - } - - // link it to used web clients - if (web_clients_cache.used) web_clients_cache.used->prev = w; - w->next = web_clients_cache.used; - w->prev = NULL; - web_clients_cache.used = w; - web_clients_cache.used_count++; - - // initialize it - w->id = web_client_connected(); - w->mode = WEB_CLIENT_MODE_NORMAL; - - netdata_thread_enable_cancelability(); - - return w; -} - -static void web_client_release(struct web_client *w) { -#ifdef NETDATA_INTERNAL_CHECKS - if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid())) - error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid()); - - if(unlikely(w->running)) - error("%llu: releasing web client from %s port %s, but it still running.", w->id, w->client_ip, w->client_port); -#endif - - debug(D_WEB_CLIENT_ACCESS, "%llu: Closing web client from %s port %s.", w->id, w->client_ip, w->client_port); - - log_connection(w, "DISCONNECTED"); - web_client_request_done(w); - web_client_disconnected(); - - netdata_thread_disable_cancelability(); - - if(web_server_mode != WEB_SERVER_MODE_STATIC_THREADED) { - if (w->ifd != -1) close(w->ifd); - if (w->ofd != -1 && w->ofd != w->ifd) close(w->ofd); - w->ifd = w->ofd = -1; - } - - // unlink it from the used - if (w == web_clients_cache.used) web_clients_cache.used = w->next; - if(w->prev) w->prev->next = w->next; - if(w->next) w->next->prev = w->prev; - web_clients_cache.used_count--; - - if(web_clients_cache.avail_count >= 2 * web_clients_cache.used_count) { - // we have too many of them - free it - web_client_free(w); - } - else { - // link it to the avail - if (web_clients_cache.avail) web_clients_cache.avail->prev = w; - w->next = web_clients_cache.avail; - w->prev = NULL; - web_clients_cache.avail = w; - web_clients_cache.avail_count++; - } - - netdata_thread_enable_cancelability(); -} - - -// ---------------------------------------------------------------------------- -// high level web clients connection management - -static void web_client_initialize_connection(struct web_client *w) { - int flag = 1; - - if(unlikely(web_client_check_tcp(w) && setsockopt(w->ifd, IPPROTO_TCP, TCP_NODELAY, (char *) &flag, sizeof(int)) != 0)) - debug(D_WEB_CLIENT, "%llu: failed to enable TCP_NODELAY on socket fd %d.", w->id, w->ifd); - - flag = 1; - if(unlikely(setsockopt(w->ifd, SOL_SOCKET, SO_KEEPALIVE, (char *) &flag, sizeof(int)) != 0)) - debug(D_WEB_CLIENT, "%llu: failed to enable SO_KEEPALIVE on socket fd %d.", w->id, w->ifd); - - web_client_update_acl_matches(w); - - w->origin[0] = '*'; w->origin[1] = '\0'; - w->cookie1[0] = '\0'; w->cookie2[0] = '\0'; - freez(w->user_agent); w->user_agent = NULL; - - web_client_enable_wait_receive(w); - - log_connection(w, "CONNECTED"); - - web_client_cache_verify(0); -} - -static struct web_client *web_client_create_on_fd(int fd, const char *client_ip, const char *client_port) { - struct web_client *w; - - w = web_client_get_from_cache_or_allocate(); - w->ifd = w->ofd = fd; - - strncpyz(w->client_ip, client_ip, sizeof(w->client_ip) - 1); - strncpyz(w->client_port, client_port, sizeof(w->client_port) - 1); - - if(unlikely(!*w->client_ip)) strcpy(w->client_ip, "-"); - if(unlikely(!*w->client_port)) strcpy(w->client_port, "-"); - - web_client_initialize_connection(w); - return(w); -} - -static struct web_client *web_client_create_on_listenfd(int listener) { - struct web_client *w; - - w = web_client_get_from_cache_or_allocate(); - w->ifd = w->ofd = accept_socket(listener, SOCK_NONBLOCK, w->client_ip, sizeof(w->client_ip), w->client_port, sizeof(w->client_port), web_allow_connections_from); - - if(unlikely(!*w->client_ip)) strcpy(w->client_ip, "-"); - if(unlikely(!*w->client_port)) strcpy(w->client_port, "-"); - - if (w->ifd == -1) { - if(errno == EPERM) - log_connection(w, "ACCESS DENIED"); - else { - log_connection(w, "CONNECTION FAILED"); - error("%llu: Failed to accept new incoming connection.", w->id); - } - - web_client_release(w); - return NULL; - } - - web_client_initialize_connection(w); - return(w); -} - - -// -------------------------------------------------------------------------------------- -// the thread of a single client - for the MULTI-THREADED web server - -// 1. waits for input and output, using async I/O -// 2. it processes HTTP requests -// 3. it generates HTTP responses -// 4. it copies data from input to output if mode is FILECOPY - -int web_client_timeout = DEFAULT_DISCONNECT_IDLE_WEB_CLIENTS_AFTER_SECONDS; -int web_client_first_request_timeout = DEFAULT_TIMEOUT_TO_RECEIVE_FIRST_WEB_REQUEST; -long web_client_streaming_rate_t = 0L; - -static void multi_threaded_web_client_worker_main_cleanup(void *ptr) { - struct web_client *w = ptr; - WEB_CLIENT_IS_DEAD(w); - w->running = 0; -} - -static void *multi_threaded_web_client_worker_main(void *ptr) { - netdata_thread_cleanup_push(multi_threaded_web_client_worker_main_cleanup, ptr); - - struct web_client *w = ptr; - w->running = 1; - - struct pollfd fds[2], *ifd, *ofd; - int retval, timeout_ms; - nfds_t fdmax = 0; - - while(!netdata_exit) { - if(unlikely(web_client_check_dead(w))) { - debug(D_WEB_CLIENT, "%llu: client is dead.", w->id); - break; - } - else if(unlikely(!web_client_has_wait_receive(w) && !web_client_has_wait_send(w))) { - debug(D_WEB_CLIENT, "%llu: client is not set for neither receiving nor sending data.", w->id); - break; - } - - if(unlikely(w->ifd < 0 || w->ofd < 0)) { - error("%llu: invalid file descriptor, ifd = %d, ofd = %d (required 0 <= fd", w->id, w->ifd, w->ofd); - break; - } - - if(w->ifd == w->ofd) { - fds[0].fd = w->ifd; - fds[0].events = 0; - fds[0].revents = 0; - - if(web_client_has_wait_receive(w)) fds[0].events |= POLLIN; - if(web_client_has_wait_send(w)) fds[0].events |= POLLOUT; - - fds[1].fd = -1; - fds[1].events = 0; - fds[1].revents = 0; - - ifd = ofd = &fds[0]; - - fdmax = 1; - } - else { - fds[0].fd = w->ifd; - fds[0].events = 0; - fds[0].revents = 0; - if(web_client_has_wait_receive(w)) fds[0].events |= POLLIN; - ifd = &fds[0]; - - fds[1].fd = w->ofd; - fds[1].events = 0; - fds[1].revents = 0; - if(web_client_has_wait_send(w)) fds[1].events |= POLLOUT; - ofd = &fds[1]; - - fdmax = 2; - } - - debug(D_WEB_CLIENT, "%llu: Waiting socket async I/O for %s %s", w->id, web_client_has_wait_receive(w)?"INPUT":"", web_client_has_wait_send(w)?"OUTPUT":""); - errno = 0; - timeout_ms = web_client_timeout * 1000; - retval = poll(fds, fdmax, timeout_ms); - - if(unlikely(netdata_exit)) break; - - if(unlikely(retval == -1)) { - if(errno == EAGAIN || errno == EINTR) { - debug(D_WEB_CLIENT, "%llu: EAGAIN received.", w->id); - continue; - } - - debug(D_WEB_CLIENT, "%llu: LISTENER: poll() failed (input fd = %d, output fd = %d). Closing client.", w->id, w->ifd, w->ofd); - break; - } - else if(unlikely(!retval)) { - debug(D_WEB_CLIENT, "%llu: Timeout while waiting socket async I/O for %s %s", w->id, web_client_has_wait_receive(w)?"INPUT":"", web_client_has_wait_send(w)?"OUTPUT":""); - break; - } - - if(unlikely(netdata_exit)) break; - - int used = 0; - if(web_client_has_wait_send(w) && ofd->revents & POLLOUT) { - used++; - if(web_client_send(w) < 0) { - debug(D_WEB_CLIENT, "%llu: Cannot send data to client. Closing client.", w->id); - break; - } - } - - if(unlikely(netdata_exit)) break; - - if(web_client_has_wait_receive(w) && (ifd->revents & POLLIN || ifd->revents & POLLPRI)) { - used++; - if(web_client_receive(w) < 0) { - debug(D_WEB_CLIENT, "%llu: Cannot receive data from client. Closing client.", w->id); - break; - } - - if(w->mode == WEB_CLIENT_MODE_NORMAL) { - debug(D_WEB_CLIENT, "%llu: Attempting to process received data.", w->id); - web_client_process_request(w); - - // if the sockets are closed, may have transferred this client - // to plugins.d - if(unlikely(w->mode == WEB_CLIENT_MODE_STREAM)) - break; - } - } - - if(unlikely(!used)) { - debug(D_WEB_CLIENT_ACCESS, "%llu: Received error on socket.", w->id); - break; - } - } - - if(w->mode != WEB_CLIENT_MODE_STREAM) - log_connection(w, "DISCONNECTED"); - - web_client_request_done(w); - - debug(D_WEB_CLIENT, "%llu: done...", w->id); - - // close the sockets/files now - // to free file descriptors - if(w->ifd == w->ofd) { - if(w->ifd != -1) close(w->ifd); - } - else { - if(w->ifd != -1) close(w->ifd); - if(w->ofd != -1) close(w->ofd); - } - w->ifd = -1; - w->ofd = -1; - - netdata_thread_cleanup_pop(1); - return NULL; -} - -// -------------------------------------------------------------------------------------- -// the main socket listener - MULTI-THREADED - -// 1. it accepts new incoming requests on our port -// 2. creates a new web_client for each connection received -// 3. spawns a new netdata_thread to serve the client (this is optimal for keep-alive clients) -// 4. cleans up old web_clients that their netdata_threads have been exited - -static void web_client_multi_threaded_web_server_release_clients(void) { - struct web_client *w; - for(w = web_clients_cache.used; w ; ) { - if(unlikely(!w->running && web_client_check_dead(w))) { - struct web_client *t = w->next; - web_client_release(w); - w = t; - } - else - w = w->next; - } -} - -static void web_client_multi_threaded_web_server_stop_all_threads(void) { - struct web_client *w; - - int found = 1; - usec_t max = 2 * USEC_PER_SEC, step = 50000; - for(w = web_clients_cache.used; w ; w = w->next) { - if(w->running) { - found++; - info("stopping web client %s, id %llu", w->client_ip, w->id); - netdata_thread_cancel(w->thread); - } - } - - while(found && max > 0) { - max -= step; - info("Waiting %d web threads to finish...", found); - sleep_usec(step); - found = 0; - for(w = web_clients_cache.used; w ; w = w->next) - if(w->running) found++; - } - - if(found) - error("%d web threads are taking too long to finish. Giving up.", found); -} - -static struct pollfd *socket_listen_main_multi_threaded_fds = NULL; - -static void socket_listen_main_multi_threaded_cleanup(void *data) { - struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data; - static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; - - info("cleaning up..."); - - info("releasing allocated memory..."); - freez(socket_listen_main_multi_threaded_fds); - - info("closing all sockets..."); - listen_sockets_close(&api_sockets); - - info("stopping all running web server threads..."); - web_client_multi_threaded_web_server_stop_all_threads(); - - info("freeing web clients cache..."); - web_client_cache_destroy(); - - info("cleanup completed."); - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; -} - -#define CLEANUP_EVERY_EVENTS 60 -void *socket_listen_main_multi_threaded(void *ptr) { - netdata_thread_cleanup_push(socket_listen_main_multi_threaded_cleanup, ptr); - - web_server_mode = WEB_SERVER_MODE_MULTI_THREADED; - web_server_is_multithreaded = 1; - - struct web_client *w; - int retval, counter = 0; - - if(!api_sockets.opened) - fatal("LISTENER: No sockets to listen to."); - - socket_listen_main_multi_threaded_fds = callocz(sizeof(struct pollfd), api_sockets.opened); - - size_t i; - for(i = 0; i < api_sockets.opened ;i++) { - socket_listen_main_multi_threaded_fds[i].fd = api_sockets.fds[i]; - socket_listen_main_multi_threaded_fds[i].events = POLLIN; - socket_listen_main_multi_threaded_fds[i].revents = 0; - - info("Listening on '%s'", (api_sockets.fds_names[i])?api_sockets.fds_names[i]:"UNKNOWN"); - } - - int timeout_ms = 1 * 1000; - - while(!netdata_exit) { - - // debug(D_WEB_CLIENT, "LISTENER: Waiting..."); - retval = poll(socket_listen_main_multi_threaded_fds, api_sockets.opened, timeout_ms); - - if(unlikely(retval == -1)) { - error("LISTENER: poll() failed."); - continue; - } - else if(unlikely(!retval)) { - debug(D_WEB_CLIENT, "LISTENER: poll() timeout."); - counter++; - continue; - } - - for(i = 0 ; i < api_sockets.opened ; i++) { - short int revents = socket_listen_main_multi_threaded_fds[i].revents; - - // check for new incoming connections - if(revents & POLLIN || revents & POLLPRI) { - socket_listen_main_multi_threaded_fds[i].revents = 0; - - w = web_client_create_on_listenfd(socket_listen_main_multi_threaded_fds[i].fd); - if(unlikely(!w)) { - // no need for error log - web_client_create_on_listenfd already logged the error - continue; - } - - if(api_sockets.fds_families[i] == AF_UNIX) - web_client_set_unix(w); - else - web_client_set_tcp(w); - - char tag[NETDATA_THREAD_TAG_MAX + 1]; - snprintfz(tag, NETDATA_THREAD_TAG_MAX, "WEB_CLIENT[%llu,[%s]:%s]", w->id, w->client_ip, w->client_port); - - w->running = 1; - if(netdata_thread_create(&w->thread, tag, NETDATA_THREAD_OPTION_DONT_LOG, multi_threaded_web_client_worker_main, w) != 0) { - w->running = 0; - web_client_release(w); - } - } - } - - counter++; - if(counter > CLEANUP_EVERY_EVENTS) { - counter = 0; - web_client_multi_threaded_web_server_release_clients(); - } - } - - netdata_thread_cleanup_pop(1); - return NULL; -} - - -// -------------------------------------------------------------------------------------- -// the main socket listener - SINGLE-THREADED - -struct web_client *single_threaded_clients[FD_SETSIZE]; - -static inline int single_threaded_link_client(struct web_client *w, fd_set *ifds, fd_set *ofds, fd_set *efds, int *max) { - if(unlikely(web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w)))) { - return 1; - } - - if(unlikely(w->ifd < 0 || w->ifd >= (int)FD_SETSIZE || w->ofd < 0 || w->ofd >= (int)FD_SETSIZE)) { - error("%llu: invalid file descriptor, ifd = %d, ofd = %d (required 0 <= fd < FD_SETSIZE (%d)", w->id, w->ifd, w->ofd, (int)FD_SETSIZE); - return 1; - } - - FD_SET(w->ifd, efds); - if(unlikely(*max < w->ifd)) *max = w->ifd; - - if(unlikely(w->ifd != w->ofd)) { - if(*max < w->ofd) *max = w->ofd; - FD_SET(w->ofd, efds); - } - - if(web_client_has_wait_receive(w)) FD_SET(w->ifd, ifds); - if(web_client_has_wait_send(w)) FD_SET(w->ofd, ofds); - - single_threaded_clients[w->ifd] = w; - single_threaded_clients[w->ofd] = w; - - return 0; -} - -static inline int single_threaded_unlink_client(struct web_client *w, fd_set *ifds, fd_set *ofds, fd_set *efds) { - FD_CLR(w->ifd, efds); - if(unlikely(w->ifd != w->ofd)) FD_CLR(w->ofd, efds); - - if(web_client_has_wait_receive(w)) FD_CLR(w->ifd, ifds); - if(web_client_has_wait_send(w)) FD_CLR(w->ofd, ofds); - - single_threaded_clients[w->ifd] = NULL; - single_threaded_clients[w->ofd] = NULL; - - if(unlikely(web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w)))) { - return 1; - } - - return 0; -} - -static void socket_listen_main_single_threaded_cleanup(void *data) { - struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data; - static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; - - info("closing all sockets..."); - listen_sockets_close(&api_sockets); - - info("freeing web clients cache..."); - web_client_cache_destroy(); - - info("cleanup completed."); - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; -} - -void *socket_listen_main_single_threaded(void *ptr) { - netdata_thread_cleanup_push(socket_listen_main_single_threaded_cleanup, ptr); - web_server_mode = WEB_SERVER_MODE_SINGLE_THREADED; - web_server_is_multithreaded = 0; - - struct web_client *w; - - if(!api_sockets.opened) - fatal("LISTENER: no listen sockets available."); - - size_t i; - for(i = 0; i < (size_t)FD_SETSIZE ; i++) - single_threaded_clients[i] = NULL; - - fd_set ifds, ofds, efds, rifds, rofds, refds; - FD_ZERO (&ifds); - FD_ZERO (&ofds); - FD_ZERO (&efds); - int fdmax = 0; - - for(i = 0; i < api_sockets.opened ; i++) { - if (api_sockets.fds[i] < 0 || api_sockets.fds[i] >= (int)FD_SETSIZE) - fatal("LISTENER: Listen socket %d is not ready, or invalid.", api_sockets.fds[i]); - - info("Listening on '%s'", (api_sockets.fds_names[i])?api_sockets.fds_names[i]:"UNKNOWN"); - - FD_SET(api_sockets.fds[i], &ifds); - FD_SET(api_sockets.fds[i], &efds); - if(fdmax < api_sockets.fds[i]) - fdmax = api_sockets.fds[i]; - } - - while(!netdata_exit) { - debug(D_WEB_CLIENT_ACCESS, "LISTENER: single threaded web server waiting (fdmax = %d)...", fdmax); - - struct timeval tv = { .tv_sec = 1, .tv_usec = 0 }; - rifds = ifds; - rofds = ofds; - refds = efds; - int retval = select(fdmax+1, &rifds, &rofds, &refds, &tv); - - if(unlikely(retval == -1)) { - error("LISTENER: select() failed."); - continue; - } - else if(likely(retval)) { - debug(D_WEB_CLIENT_ACCESS, "LISTENER: got something."); - - for(i = 0; i < api_sockets.opened ; i++) { - if (FD_ISSET(api_sockets.fds[i], &rifds)) { - debug(D_WEB_CLIENT_ACCESS, "LISTENER: new connection."); - w = web_client_create_on_listenfd(api_sockets.fds[i]); - if(unlikely(!w)) - continue; - - if(api_sockets.fds_families[i] == AF_UNIX) - web_client_set_unix(w); - else - web_client_set_tcp(w); - - if (single_threaded_link_client(w, &ifds, &ofds, &ifds, &fdmax) != 0) { - web_client_release(w); - } - } - } - - for(i = 0 ; i <= (size_t)fdmax ; i++) { - if(likely(!FD_ISSET(i, &rifds) && !FD_ISSET(i, &rofds) && !FD_ISSET(i, &refds))) - continue; - - w = single_threaded_clients[i]; - if(unlikely(!w)) { - // error("no client on slot %zu", i); - continue; - } - - if(unlikely(single_threaded_unlink_client(w, &ifds, &ofds, &efds) != 0)) { - // error("failed to unlink client %zu", i); - web_client_release(w); - continue; - } - - if (unlikely(FD_ISSET(w->ifd, &refds) || FD_ISSET(w->ofd, &refds))) { - // error("no input on client %zu", i); - web_client_release(w); - continue; - } - - if (unlikely(web_client_has_wait_receive(w) && FD_ISSET(w->ifd, &rifds))) { - if (unlikely(web_client_receive(w) < 0)) { - // error("cannot read from client %zu", i); - web_client_release(w); - continue; - } - - if (w->mode != WEB_CLIENT_MODE_FILECOPY) { - debug(D_WEB_CLIENT, "%llu: Processing received data.", w->id); - web_client_process_request(w); - } - } - - if (unlikely(web_client_has_wait_send(w) && FD_ISSET(w->ofd, &rofds))) { - if (unlikely(web_client_send(w) < 0)) { - // error("cannot send data to client %zu", i); - debug(D_WEB_CLIENT, "%llu: Cannot send data to client. Closing client.", w->id); - web_client_release(w); - continue; - } - } - - if(unlikely(single_threaded_link_client(w, &ifds, &ofds, &efds, &fdmax) != 0)) { - // error("failed to link client %zu", i); - web_client_release(w); - } - } - } - else { - debug(D_WEB_CLIENT_ACCESS, "LISTENER: single threaded web server timeout."); - } - } - - netdata_thread_cleanup_pop(1); - return NULL; -} - - -// -------------------------------------------------------------------------------------- -// the main socket listener - STATIC-THREADED - -struct web_server_static_threaded_worker { - netdata_thread_t thread; - - int id; - int running; - - size_t max_sockets; - - volatile size_t connected; - volatile size_t disconnected; - volatile size_t receptions; - volatile size_t sends; - volatile size_t max_concurrent; - - volatile size_t files_read; - volatile size_t file_reads; -}; - -static long long static_threaded_workers_count = 1; -static struct web_server_static_threaded_worker *static_workers_private_data = NULL; -static __thread struct web_server_static_threaded_worker *worker_private = NULL; - -// ---------------------------------------------------------------------------- - -static inline int web_server_check_client_status(struct web_client *w) { - if(unlikely(web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w)))) - return -1; - - return 0; -} - -// ---------------------------------------------------------------------------- -// web server files - -static void *web_server_file_add_callback(POLLINFO *pi, short int *events, void *data) { - struct web_client *w = (struct web_client *)data; - - worker_private->files_read++; - - debug(D_WEB_CLIENT, "%llu: ADDED FILE READ ON FD %d", w->id, pi->fd); - *events = POLLIN; - pi->data = w; - return w; -} - -static void web_werver_file_del_callback(POLLINFO *pi) { - struct web_client *w = (struct web_client *)pi->data; - debug(D_WEB_CLIENT, "%llu: RELEASE FILE READ ON FD %d", w->id, pi->fd); - - w->pollinfo_filecopy_slot = 0; - - if(unlikely(!w->pollinfo_slot)) { - debug(D_WEB_CLIENT, "%llu: CROSS WEB CLIENT CLEANUP (iFD %d, oFD %d)", w->id, pi->fd, w->ofd); - web_client_release(w); - } -} - -static int web_server_file_read_callback(POLLINFO *pi, short int *events) { - struct web_client *w = (struct web_client *)pi->data; - - // if there is no POLLINFO linked to this, it means the client disconnected - // stop the file reading too - if(unlikely(!w->pollinfo_slot)) { - debug(D_WEB_CLIENT, "%llu: PREVENTED ATTEMPT TO READ FILE ON FD %d, ON CLOSED WEB CLIENT", w->id, pi->fd); - return -1; - } - - if(unlikely(w->mode != WEB_CLIENT_MODE_FILECOPY || w->ifd == w->ofd)) { - debug(D_WEB_CLIENT, "%llu: PREVENTED ATTEMPT TO READ FILE ON FD %d, ON NON-FILECOPY WEB CLIENT", w->id, pi->fd); - return -1; - } - - debug(D_WEB_CLIENT, "%llu: READING FILE ON FD %d", w->id, pi->fd); - - worker_private->file_reads++; - ssize_t ret = unlikely(web_client_read_file(w)); - - if(likely(web_client_has_wait_send(w))) { - POLLJOB *p = pi->p; // our POLLJOB - POLLINFO *wpi = pollinfo_from_slot(p, w->pollinfo_slot); // POLLINFO of the client socket - - debug(D_WEB_CLIENT, "%llu: SIGNALING W TO SEND (iFD %d, oFD %d)", w->id, pi->fd, wpi->fd); - p->fds[wpi->slot].events |= POLLOUT; - } - - if(unlikely(ret <= 0 || w->ifd == w->ofd)) { - debug(D_WEB_CLIENT, "%llu: DONE READING FILE ON FD %d", w->id, pi->fd); - return -1; - } - - *events = POLLIN; - return 0; -} - -static int web_server_file_write_callback(POLLINFO *pi, short int *events) { - (void)pi; - (void)events; - - error("Writing to web files is not supported!"); - - return -1; -} - -// ---------------------------------------------------------------------------- -// web server clients - -static void *web_server_add_callback(POLLINFO *pi, short int *events, void *data) { - (void)data; - - worker_private->connected++; - - size_t concurrent = worker_private->connected - worker_private->disconnected; - if(unlikely(concurrent > worker_private->max_concurrent)) - worker_private->max_concurrent = concurrent; - - *events = POLLIN; - - debug(D_WEB_CLIENT_ACCESS, "LISTENER on %d: new connection.", pi->fd); - struct web_client *w = web_client_create_on_fd(pi->fd, pi->client_ip, pi->client_port); - w->pollinfo_slot = pi->slot; - - if(unlikely(pi->socktype == AF_UNIX)) - web_client_set_unix(w); - else - web_client_set_tcp(w); - - debug(D_WEB_CLIENT, "%llu: ADDED CLIENT FD %d", w->id, pi->fd); - return w; -} - -// TCP client disconnected -static void web_server_del_callback(POLLINFO *pi) { - worker_private->disconnected++; - - struct web_client *w = (struct web_client *)pi->data; - - w->pollinfo_slot = 0; - if(unlikely(w->pollinfo_filecopy_slot)) { - POLLINFO *fpi = pollinfo_from_slot(pi->p, w->pollinfo_filecopy_slot); // POLLINFO of the client socket - debug(D_WEB_CLIENT, "%llu: THE CLIENT WILL BE FRED BY READING FILE JOB ON FD %d", w->id, fpi->fd); - } - else { - if(web_client_flag_check(w, WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET)) - pi->flags |= POLLINFO_FLAG_DONT_CLOSE; - - debug(D_WEB_CLIENT, "%llu: CLOSING CLIENT FD %d", w->id, pi->fd); - web_client_release(w); - } -} - -static int web_server_rcv_callback(POLLINFO *pi, short int *events) { - worker_private->receptions++; - - struct web_client *w = (struct web_client *)pi->data; - int fd = pi->fd; - - if(unlikely(web_client_receive(w) < 0)) - return -1; - - debug(D_WEB_CLIENT, "%llu: processing received data on fd %d.", w->id, fd); - web_client_process_request(w); - - if(unlikely(w->mode == WEB_CLIENT_MODE_FILECOPY)) { - if(w->pollinfo_filecopy_slot == 0) { - debug(D_WEB_CLIENT, "%llu: FILECOPY DETECTED ON FD %d", w->id, pi->fd); - - if (unlikely(w->ifd != -1 && w->ifd != w->ofd && w->ifd != fd)) { - // add a new socket to poll_events, with the same - debug(D_WEB_CLIENT, "%llu: CREATING FILECOPY SLOT ON FD %d", w->id, pi->fd); - - POLLINFO *fpi = poll_add_fd( - pi->p - , w->ifd - , 0 - , POLLINFO_FLAG_CLIENT_SOCKET - , "FILENAME" - , "" - , web_server_file_add_callback - , web_werver_file_del_callback - , web_server_file_read_callback - , web_server_file_write_callback - , (void *) w - ); - - if(fpi) - w->pollinfo_filecopy_slot = fpi->slot; - else { - error("Failed to add filecopy fd. Closing client."); - return -1; - } - } - } - } - else { - if(unlikely(w->ifd == fd && web_client_has_wait_receive(w))) - *events |= POLLIN; - } - - if(unlikely(w->ofd == fd && web_client_has_wait_send(w))) - *events |= POLLOUT; - - return web_server_check_client_status(w); -} - -static int web_server_snd_callback(POLLINFO *pi, short int *events) { - worker_private->sends++; - - struct web_client *w = (struct web_client *)pi->data; - int fd = pi->fd; - - debug(D_WEB_CLIENT, "%llu: sending data on fd %d.", w->id, fd); - - if(unlikely(web_client_send(w) < 0)) - return -1; - - if(unlikely(w->ifd == fd && web_client_has_wait_receive(w))) - *events |= POLLIN; - - if(unlikely(w->ofd == fd && web_client_has_wait_send(w))) - *events |= POLLOUT; - - return web_server_check_client_status(w); -} - -static void web_server_tmr_callback(void *timer_data) { - worker_private = (struct web_server_static_threaded_worker *)timer_data; - - static __thread RRDSET *st = NULL; - static __thread RRDDIM *rd_user = NULL, *rd_system = NULL; - - if(unlikely(!st)) { - char id[100 + 1]; - char title[100 + 1]; - - snprintfz(id, 100, "web_thread%d_cpu", worker_private->id + 1); - snprintfz(title, 100, "NetData web server thread No %d CPU usage", worker_private->id + 1); - - st = rrdset_create_localhost( - "netdata" - , id - , NULL - , "web" - , "netdata.web_cpu" - , title - , "milliseconds/s" - , "web" - , "stats" - , 132000 + worker_private->id - , default_rrd_update_every - , RRDSET_TYPE_STACKED - ); - - rd_user = rrddim_add(st, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); - rd_system = rrddim_add(st, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); - } - else - rrdset_next(st); - - struct rusage rusage; - getrusage(RUSAGE_THREAD, &rusage); - rrddim_set_by_pointer(st, rd_user, rusage.ru_utime.tv_sec * 1000000ULL + rusage.ru_utime.tv_usec); - rrddim_set_by_pointer(st, rd_system, rusage.ru_stime.tv_sec * 1000000ULL + rusage.ru_stime.tv_usec); - rrdset_done(st); -} - -// ---------------------------------------------------------------------------- -// web server worker thread - -static void socket_listen_main_static_threaded_worker_cleanup(void *ptr) { - worker_private = (struct web_server_static_threaded_worker *)ptr; - - info("freeing local web clients cache..."); - web_client_cache_destroy(); - - info("stopped after %zu connects, %zu disconnects (max concurrent %zu), %zu receptions and %zu sends", - worker_private->connected, - worker_private->disconnected, - worker_private->max_concurrent, - worker_private->receptions, - worker_private->sends - ); - - worker_private->running = 0; -} - -void *socket_listen_main_static_threaded_worker(void *ptr) { - worker_private = (struct web_server_static_threaded_worker *)ptr; - worker_private->running = 1; - - netdata_thread_cleanup_push(socket_listen_main_static_threaded_worker_cleanup, ptr); - - poll_events(&api_sockets - , web_server_add_callback - , web_server_del_callback - , web_server_rcv_callback - , web_server_snd_callback - , web_server_tmr_callback - , web_allow_connections_from - , NULL - , web_client_first_request_timeout - , web_client_timeout - , default_rrd_update_every * 1000 // timer_milliseconds - , ptr // timer_data - , worker_private->max_sockets - ); - - netdata_thread_cleanup_pop(1); - return NULL; -} - - -// ---------------------------------------------------------------------------- -// web server main thread - also becomes a worker - -static void socket_listen_main_static_threaded_cleanup(void *ptr) { - struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; - static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; - - int i, found = 0; - usec_t max = 2 * USEC_PER_SEC, step = 50000; - - // we start from 1, - 0 is self - for(i = 1; i < static_threaded_workers_count; i++) { - if(static_workers_private_data[i].running) { - found++; - info("stopping worker %d", i + 1); - netdata_thread_cancel(static_workers_private_data[i].thread); - } - else - info("found stopped worker %d", i + 1); - } - - while(found && max > 0) { - max -= step; - info("Waiting %d static web threads to finish...", found); - sleep_usec(step); - found = 0; - - // we start from 1, - 0 is self - for(i = 1; i < static_threaded_workers_count; i++) { - if (static_workers_private_data[i].running) - found++; - } - } - - if(found) - error("%d static web threads are taking too long to finish. Giving up.", found); - - info("closing all web server sockets..."); - listen_sockets_close(&api_sockets); - - info("all static web threads stopped."); - static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; -} - -void *socket_listen_main_static_threaded(void *ptr) { - netdata_thread_cleanup_push(socket_listen_main_static_threaded_cleanup, ptr); - web_server_mode = WEB_SERVER_MODE_STATIC_THREADED; - - if(!api_sockets.opened) - fatal("LISTENER: no listen sockets available."); - - // 6 threads is the optimal value - // since 6 are the parallel connections browsers will do - // so, if the machine has more CPUs, avoid using resources unnecessarily - int def_thread_count = (processors > 6)?6:processors; - - static_threaded_workers_count = config_get_number(CONFIG_SECTION_WEB, "web server threads", def_thread_count); - if(static_threaded_workers_count < 1) static_threaded_workers_count = 1; - - size_t max_sockets = (size_t)config_get_number(CONFIG_SECTION_WEB, "web server max sockets", (long long int)(rlimit_nofile.rlim_cur / 2)); - - static_workers_private_data = callocz((size_t)static_threaded_workers_count, sizeof(struct web_server_static_threaded_worker)); - - web_server_is_multithreaded = (static_threaded_workers_count > 1); - - int i; - for(i = 1; i < static_threaded_workers_count; i++) { - static_workers_private_data[i].id = i; - static_workers_private_data[i].max_sockets = max_sockets / static_threaded_workers_count; - - char tag[50 + 1]; - snprintfz(tag, 50, "WEB_SERVER[static%d]", i+1); - - info("starting worker %d", i+1); - netdata_thread_create(&static_workers_private_data[i].thread, tag, NETDATA_THREAD_OPTION_DEFAULT, socket_listen_main_static_threaded_worker, (void *)&static_workers_private_data[i]); - } - - // and the main one - static_workers_private_data[0].max_sockets = max_sockets / static_threaded_workers_count; - socket_listen_main_static_threaded_worker((void *)&static_workers_private_data[0]); - - netdata_thread_cleanup_pop(1); - return NULL; -} diff --git a/streaming/Makefile.am b/streaming/Makefile.am new file mode 100644 index 0000000000..84048948b4 --- /dev/null +++ b/streaming/Makefile.am @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_libconfig_DATA = \ + stream.conf \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/streaming/README.md b/streaming/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/streaming/rrdpush.c b/streaming/rrdpush.c similarity index 100% rename from src/streaming/rrdpush.c rename to streaming/rrdpush.c diff --git a/src/streaming/rrdpush.h b/streaming/rrdpush.h similarity index 92% rename from src/streaming/rrdpush.h rename to streaming/rrdpush.h index 6fc272b916..7bf3db93a9 100644 --- a/src/streaming/rrdpush.h +++ b/streaming/rrdpush.h @@ -3,8 +3,8 @@ #ifndef NETDATA_RRDPUSH_H #define NETDATA_RRDPUSH_H 1 -#include "../webserver/web_client.h" -#include "../common.h" +#include "web/server/web_client.h" +#include "daemon/common.h" extern unsigned int default_rrdpush_enabled; extern char *default_rrdpush_destination; diff --git a/conf.d/stream.conf b/streaming/stream.conf similarity index 100% rename from conf.d/stream.conf rename to streaming/stream.conf diff --git a/system/Makefile.am b/system/Makefile.am index 9300583b85..eca8c565b9 100644 --- a/system/Makefile.am +++ b/system/Makefile.am @@ -4,6 +4,7 @@ # MAINTAINERCLEANFILES= $(srcdir)/Makefile.in CLEANFILES = \ + edit-config \ netdata-openrc \ netdata.logrotate \ netdata.service \ @@ -14,10 +15,14 @@ CLEANFILES = \ $(NULL) include $(top_srcdir)/build/subst.inc - SUFFIXES = .in +dist_config_SCRIPTS = \ + edit-config \ + $(NULL) + nodist_noinst_DATA = \ + edit-config.in \ netdata-openrc \ netdata.logrotate \ netdata.service \ diff --git a/conf.d/edit-config.in b/system/edit-config.in similarity index 100% rename from conf.d/edit-config.in rename to system/edit-config.in diff --git a/tests/Makefile.am b/tests/Makefile.am index 3a0246d08f..722266d771 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-3.0-or-later -MAINTAINERCLEANFILES= $(srcdir)/Makefile.in + +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in dist_noinst_DATA = \ README.md \ diff --git a/tests/profile/benchmark-line-parsing.c b/tests/profile/benchmark-line-parsing.c index 992c8fd801..c07d1d857b 100644 --- a/tests/profile/benchmark-line-parsing.c +++ b/tests/profile/benchmark-line-parsing.c @@ -383,9 +383,15 @@ struct base { }; static inline void callback(void *data1, void *data2) { - char *string = data1; - unsigned long long *value = data2; - *value = fast_strtoull(string); + char *string = data1; + unsigned long long *value = data2; + *value = fast_strtoull(string); +} + +static inline void callback_system_strtoull(void *data1, void *data2) { + char *string = data1; + unsigned long long *value = data2; + *value = strtoull(string, NULL, 10); } @@ -415,7 +421,7 @@ static inline struct base *entry(struct base *base, const char *name, void *data static inline int check(struct base *base, const char *s) { uint32_t hash = simple_hash2(s); - if(likely(hash == base->last->hash && !strcmp(s, base->last->name))) { + if(likely(!strcmp(s, base->last->name))) { base->last->found = 1; base->found++; if(base->last->func) base->last->func(base->last->data1, base->last->data2); @@ -514,17 +520,17 @@ void test6() { static struct base *base = NULL; if(unlikely(!base)) { - base = entry(base, "cache", NUMBER1, &values6[0], callback); - base = entry(base, "rss", NUMBER2, &values6[1], callback); - base = entry(base, "rss_huge", NUMBER3, &values6[2], callback); - base = entry(base, "mapped_file", NUMBER4, &values6[3], callback); - base = entry(base, "writeback", NUMBER5, &values6[4], callback); - base = entry(base, "dirty", NUMBER6, &values6[5], callback); - base = entry(base, "swap", NUMBER7, &values6[6], callback); - base = entry(base, "pgpgin", NUMBER8, &values6[7], callback); - base = entry(base, "pgpgout", NUMBER9, &values6[8], callback); - base = entry(base, "pgfault", NUMBER10, &values6[9], callback); - base = entry(base, "pgmajfault", NUMBER11, &values6[10], callback); + base = entry(base, "cache", NUMBER1, &values6[0], callback_system_strtoull); + base = entry(base, "rss", NUMBER2, &values6[1], callback_system_strtoull); + base = entry(base, "rss_huge", NUMBER3, &values6[2], callback_system_strtoull); + base = entry(base, "mapped_file", NUMBER4, &values6[3], callback_system_strtoull); + base = entry(base, "writeback", NUMBER5, &values6[4], callback_system_strtoull); + base = entry(base, "dirty", NUMBER6, &values6[5], callback_system_strtoull); + base = entry(base, "swap", NUMBER7, &values6[6], callback_system_strtoull); + base = entry(base, "pgpgin", NUMBER8, &values6[7], callback_system_strtoull); + base = entry(base, "pgpgout", NUMBER9, &values6[8], callback_system_strtoull); + base = entry(base, "pgfault", NUMBER10, &values6[9], callback_system_strtoull); + base = entry(base, "pgmajfault", NUMBER11, &values6[10], callback_system_strtoull); } begin(base); @@ -536,6 +542,33 @@ void test6() { } } +void test7() { + + static struct base *base = NULL; + + if(unlikely(!base)) { + base = entry(base, "cache", NUMBER1, &values6[0], callback); + base = entry(base, "rss", NUMBER2, &values6[1], callback); + base = entry(base, "rss_huge", NUMBER3, &values6[2], callback); + base = entry(base, "mapped_file", NUMBER4, &values6[3], callback); + base = entry(base, "writeback", NUMBER5, &values6[4], callback); + base = entry(base, "dirty", NUMBER6, &values6[5], callback); + base = entry(base, "swap", NUMBER7, &values6[6], callback); + base = entry(base, "pgpgin", NUMBER8, &values6[7], callback); + base = entry(base, "pgpgout", NUMBER9, &values6[8], callback); + base = entry(base, "pgfault", NUMBER10, &values6[9], callback); + base = entry(base, "pgmajfault", NUMBER11, &values6[10], callback); + } + + begin(base); + + int i; + for(i = 0; strings[i] ; i++) { + if(check(base, strings[i])) + break; + } +} + // ---------------------------------------------------------------------------- @@ -615,8 +648,13 @@ void main(void) (void)strcmp("1", "2"); (void)strtoull("123", NULL, 0); - unsigned long i, c1 = 0, c2 = 0, c3 = 0, c4 = 0, c5 = 0, c6 = 0; - unsigned long max = 200000; + unsigned long i, c1 = 0, c2 = 0, c3 = 0, c4 = 0, c5 = 0, c6 = 0, c7; + unsigned long max = 1000000; + + // let the processor get up to speed + begin_clock(); + for(i = 0; i <= max ;i++) test1(); + c1 = end_clock(); begin_clock(); for(i = 0; i <= max ;i++) test1(); @@ -638,26 +676,32 @@ void main(void) for(i = 0; i <= max ;i++) test5(); c5 = end_clock(); - begin_clock(); - for(i = 0; i <= max ;i++) test6(); - c6 = end_clock(); + begin_clock(); + for(i = 0; i <= max ;i++) test6(); + c6 = end_clock(); - for(i = 0; i < 11 ; i++) + begin_clock(); + for(i = 0; i <= max ;i++) test7(); + c7 = end_clock(); + + for(i = 0; i < 11 ; i++) printf("value %lu: %llu %llu %llu %llu %llu %llu\n", i, values1[i], values2[i], values3[i], values4[i], values5[i], values6[i]); printf("\n\nRESULTS\n"); - printf("test1() in %lu usecs: simple system strcmp().\n" - "test2() in %lu usecs: inline simple_hash() with system strtoull().\n" + printf("test1() in %lu usecs: if-else-if-else-if, simple strcmp() with system strtoull().\n" + "test2() in %lu usecs: inline simple_hash() if-else-if-else-if, with system strtoull().\n" "test3() in %lu usecs: statement expression simple_hash(), system strtoull().\n" - "test4() in %lu usecs: inline simple_hash(), if-continue checks.\n" - "test5() in %lu usecs: inline simple_hash(), if-else-if-else-if (netdata default).\n" - "test6() in %lu usecs: adaptive re-sortable array (wow!)\n" + "test4() in %lu usecs: inline simple_hash(), if-continue checks, system strtoull().\n" + "test5() in %lu usecs: inline simple_hash(), if-else-if-else-if, custom strtoull() (netdata default prior to ARL).\n" + "test6() in %lu usecs: adaptive re-sortable list, system strtoull() (wow!)\n" + "test7() in %lu usecs: adaptive re-sortable list, custom strtoull() (wow!)\n" , c1 , c2 , c3 , c4 , c5 , c6 + , c7 ); } diff --git a/web/Makefile.am b/web/Makefile.am index 81aa0f81d3..1ec8d586d4 100644 --- a/web/Makefile.am +++ b/web/Makefile.am @@ -1,121 +1,14 @@ -# -# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com> # SPDX-License-Identifier: GPL-3.0-or-later -# -MAINTAINERCLEANFILES= $(srcdir)/Makefile.in -dist_web_DATA = \ - demo.html \ - demo2.html \ - demosites.html \ - demosites2.html \ - dashboard.html \ - dashboard.js \ - dashboard_info.js \ - dashboard_info_custom_example.js \ - dashboard.css \ - dashboard.slate.css \ - favicon.ico \ - goto-host-from-alarm.html \ - index.html \ - infographic.html \ - netdata-swagger.yaml \ - netdata-swagger.json \ - robots.txt \ - refresh-badges.js \ - registry.html \ - sitemap.xml \ - tv.html \ - version.txt \ +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +SUBDIRS = \ + api \ + gui \ + server \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ $(NULL) - -weblibdir=$(webdir)/lib -dist_weblib_DATA = \ - lib/bootstrap-3.3.7.min.js \ - lib/bootstrap-slider-10.0.0.min.js \ - lib/bootstrap-table-1.11.0.min.js \ - lib/bootstrap-table-export-1.11.0.min.js \ - lib/bootstrap-toggle-2.2.2.min.js \ - lib/clipboard-polyfill-be05dad.js \ - lib/c3-0.4.18.min.js \ - lib/d3-4.12.2.min.js \ - lib/d3pie-0.2.1-netdata-3.js \ - lib/dygraph-c91c859.min.js \ - lib/dygraph-smooth-plotter-c91c859.js \ - lib/fontawesome-all-5.0.1.min.js \ - lib/gauge-1.3.2.min.js \ - lib/jquery-2.2.4.min.js \ - lib/jquery.easypiechart-97b5824.min.js \ - lib/jquery.peity-3.2.0.min.js \ - lib/jquery.sparkline-2.1.2.min.js \ - lib/lz-string-1.4.4.min.js \ - lib/morris-0.5.1.min.js \ - lib/pako-1.0.6.min.js \ - lib/perfect-scrollbar-0.6.15.min.js \ - lib/raphael-2.2.4-min.js \ - lib/tableExport-1.6.0.min.js \ - $(NULL) - -webcssdir=$(webdir)/css -dist_webcss_DATA = \ - css/morris-0.5.1.css \ - css/bootstrap-3.3.7.css \ - css/bootstrap-theme-3.3.7.min.css \ - css/bootstrap-slate-flat-3.3.7.css \ - css/bootstrap-slider-10.0.0.min.css \ - css/bootstrap-toggle-2.2.2.min.css \ - css/c3-0.4.18.min.css \ - $(NULL) - -webfontsdir=$(webdir)/fonts -dist_webfonts_DATA = \ - fonts/glyphicons-halflings-regular.eot \ - fonts/glyphicons-halflings-regular.svg \ - fonts/glyphicons-halflings-regular.ttf \ - fonts/glyphicons-halflings-regular.woff \ - fonts/glyphicons-halflings-regular.woff2 \ - $(NULL) - -webimagesdir=$(webdir)/images -dist_webimages_DATA = \ - images/alert-128-orange.png \ - images/alert-128-red.png \ - images/alert-multi-size-orange.ico \ - images/alert-multi-size-red.ico \ - images/animated.gif \ - images/check-mark-2-128-green.png \ - images/check-mark-2-multi-size-green.ico \ - images/netdata.svg \ - images/post.png \ - images/seo-performance-16.png \ - images/seo-performance-24.png \ - images/seo-performance-32.png \ - images/seo-performance-48.png \ - images/seo-performance-64.png \ - images/seo-performance-72.png \ - images/seo-performance-114.png \ - images/seo-performance-128.png \ - images/seo-performance-256.png \ - images/seo-performance-512.png \ - images/seo-performance-multi-size.ico \ - images/seo-performance-multi-size.icns \ - $(NULL) - - -webwellknowndir=$(webdir)/.well-known -dist_webwellknown_DATA = \ - $(NULL) - -webdntdir=$(webdir)/.well-known/dnt -dist_webdnt_DATA = \ - .well-known/dnt/cookies \ - $(NULL) - -version.txt: - if test -d "$(top_srcdir)/.git"; then \ - git --git-dir="$(top_srcdir)/.git" log -n 1 --format=%H; \ - fi > $@.tmp - test -s $@.tmp || echo 0 > $@.tmp - mv $@.tmp $@ - -.PHONY: version.txt diff --git a/web/README.md b/web/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/web/api/Makefile.am b/web/api/Makefile.am new file mode 100644 index 0000000000..19554bed8e --- /dev/null +++ b/web/api/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/api/README.md b/web/api/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/api/rrd2json.c b/web/api/rrd2json.c similarity index 100% rename from src/api/rrd2json.c rename to web/api/rrd2json.c diff --git a/src/api/rrd2json.h b/web/api/rrd2json.h similarity index 100% rename from src/api/rrd2json.h rename to web/api/rrd2json.h diff --git a/src/api/web_api_v1.c b/web/api/web_api_v1.c similarity index 100% rename from src/api/web_api_v1.c rename to web/api/web_api_v1.c diff --git a/src/api/web_api_v1.h b/web/api/web_api_v1.h similarity index 98% rename from src/api/web_api_v1.h rename to web/api/web_api_v1.h index 5f32de5dd4..a8e44459e8 100644 --- a/src/api/web_api_v1.h +++ b/web/api/web_api_v1.h @@ -3,7 +3,7 @@ #ifndef NETDATA_WEB_API_V1_H #define NETDATA_WEB_API_V1_H 1 -#include "../common.h" +#include "daemon/common.h" #include "web_buffer_svg.h" #include "rrd2json.h" diff --git a/src/api/web_buffer_svg.c b/web/api/web_buffer_svg.c similarity index 99% rename from src/api/web_buffer_svg.c rename to web/api/web_buffer_svg.c index f5519dbe9f..b54ac0ff07 100644 --- a/src/api/web_buffer_svg.c +++ b/web/api/web_buffer_svg.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../common.h" +#include "web_buffer_svg.h" #define BADGE_HORIZONTAL_PADDING 4 #define VERDANA_KERNING 0.2 diff --git a/src/api/web_buffer_svg.h b/web/api/web_buffer_svg.h similarity index 100% rename from src/api/web_buffer_svg.h rename to web/api/web_buffer_svg.h diff --git a/web/.well-known/dnt/cookies b/web/gui/.well-known/dnt/cookies similarity index 100% rename from web/.well-known/dnt/cookies rename to web/gui/.well-known/dnt/cookies diff --git a/web/gui/Makefile.am b/web/gui/Makefile.am new file mode 100644 index 0000000000..d8f86a9af1 --- /dev/null +++ b/web/gui/Makefile.am @@ -0,0 +1,125 @@ +# +# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com> +# SPDX-License-Identifier: GPL-3.0-or-later +# +MAINTAINERCLEANFILES= $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +dist_web_DATA = \ + demo.html \ + demo2.html \ + demosites.html \ + demosites2.html \ + dashboard.html \ + dashboard.js \ + dashboard_info.js \ + dashboard_info_custom_example.js \ + dashboard.css \ + dashboard.slate.css \ + favicon.ico \ + goto-host-from-alarm.html \ + index.html \ + infographic.html \ + netdata-swagger.yaml \ + netdata-swagger.json \ + robots.txt \ + refresh-badges.js \ + registry.html \ + sitemap.xml \ + tv.html \ + version.txt \ + $(NULL) + +weblibdir=$(webdir)/lib +dist_weblib_DATA = \ + lib/bootstrap-3.3.7.min.js \ + lib/bootstrap-slider-10.0.0.min.js \ + lib/bootstrap-table-1.11.0.min.js \ + lib/bootstrap-table-export-1.11.0.min.js \ + lib/bootstrap-toggle-2.2.2.min.js \ + lib/clipboard-polyfill-be05dad.js \ + lib/c3-0.4.18.min.js \ + lib/d3-4.12.2.min.js \ + lib/d3pie-0.2.1-netdata-3.js \ + lib/dygraph-c91c859.min.js \ + lib/dygraph-smooth-plotter-c91c859.js \ + lib/fontawesome-all-5.0.1.min.js \ + lib/gauge-1.3.2.min.js \ + lib/jquery-2.2.4.min.js \ + lib/jquery.easypiechart-97b5824.min.js \ + lib/jquery.peity-3.2.0.min.js \ + lib/jquery.sparkline-2.1.2.min.js \ + lib/lz-string-1.4.4.min.js \ + lib/morris-0.5.1.min.js \ + lib/pako-1.0.6.min.js \ + lib/perfect-scrollbar-0.6.15.min.js \ + lib/raphael-2.2.4-min.js \ + lib/tableExport-1.6.0.min.js \ + $(NULL) + +webcssdir=$(webdir)/css +dist_webcss_DATA = \ + css/morris-0.5.1.css \ + css/bootstrap-3.3.7.css \ + css/bootstrap-theme-3.3.7.min.css \ + css/bootstrap-slate-flat-3.3.7.css \ + css/bootstrap-slider-10.0.0.min.css \ + css/bootstrap-toggle-2.2.2.min.css \ + css/c3-0.4.18.min.css \ + $(NULL) + +webfontsdir=$(webdir)/fonts +dist_webfonts_DATA = \ + fonts/glyphicons-halflings-regular.eot \ + fonts/glyphicons-halflings-regular.svg \ + fonts/glyphicons-halflings-regular.ttf \ + fonts/glyphicons-halflings-regular.woff \ + fonts/glyphicons-halflings-regular.woff2 \ + $(NULL) + +webimagesdir=$(webdir)/images +dist_webimages_DATA = \ + images/alert-128-orange.png \ + images/alert-128-red.png \ + images/alert-multi-size-orange.ico \ + images/alert-multi-size-red.ico \ + images/animated.gif \ + images/check-mark-2-128-green.png \ + images/check-mark-2-multi-size-green.ico \ + images/netdata.svg \ + images/post.png \ + images/seo-performance-16.png \ + images/seo-performance-24.png \ + images/seo-performance-32.png \ + images/seo-performance-48.png \ + images/seo-performance-64.png \ + images/seo-performance-72.png \ + images/seo-performance-114.png \ + images/seo-performance-128.png \ + images/seo-performance-256.png \ + images/seo-performance-512.png \ + images/seo-performance-multi-size.ico \ + images/seo-performance-multi-size.icns \ + $(NULL) + + +webwellknowndir=$(webdir)/.well-known +dist_webwellknown_DATA = \ + $(NULL) + +webdntdir=$(webdir)/.well-known/dnt +dist_webdnt_DATA = \ + .well-known/dnt/cookies \ + $(NULL) + +version.txt: + if test -d "$(top_srcdir)/.git"; then \ + git --git-dir="$(top_srcdir)/.git" log -n 1 --format=%H; \ + fi > $@.tmp + test -s $@.tmp || echo 0 > $@.tmp + mv $@.tmp $@ + +.PHONY: version.txt diff --git a/web/gui/README.md b/web/gui/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/web/css/bootstrap-3.3.7.css b/web/gui/css/bootstrap-3.3.7.css similarity index 100% rename from web/css/bootstrap-3.3.7.css rename to web/gui/css/bootstrap-3.3.7.css diff --git a/web/css/bootstrap-slate-flat-3.3.7.css b/web/gui/css/bootstrap-slate-flat-3.3.7.css similarity index 100% rename from web/css/bootstrap-slate-flat-3.3.7.css rename to web/gui/css/bootstrap-slate-flat-3.3.7.css diff --git a/web/css/bootstrap-slider-10.0.0.min.css b/web/gui/css/bootstrap-slider-10.0.0.min.css similarity index 100% rename from web/css/bootstrap-slider-10.0.0.min.css rename to web/gui/css/bootstrap-slider-10.0.0.min.css diff --git a/web/css/bootstrap-theme-3.3.7.min.css b/web/gui/css/bootstrap-theme-3.3.7.min.css similarity index 100% rename from web/css/bootstrap-theme-3.3.7.min.css rename to web/gui/css/bootstrap-theme-3.3.7.min.css diff --git a/web/css/bootstrap-toggle-2.2.2.min.css b/web/gui/css/bootstrap-toggle-2.2.2.min.css similarity index 100% rename from web/css/bootstrap-toggle-2.2.2.min.css rename to web/gui/css/bootstrap-toggle-2.2.2.min.css diff --git a/web/css/c3-0.4.18.min.css b/web/gui/css/c3-0.4.18.min.css similarity index 100% rename from web/css/c3-0.4.18.min.css rename to web/gui/css/c3-0.4.18.min.css diff --git a/web/css/morris-0.5.1.css b/web/gui/css/morris-0.5.1.css similarity index 100% rename from web/css/morris-0.5.1.css rename to web/gui/css/morris-0.5.1.css diff --git a/web/dashboard.css b/web/gui/dashboard.css similarity index 100% rename from web/dashboard.css rename to web/gui/dashboard.css diff --git a/web/dashboard.html b/web/gui/dashboard.html similarity index 100% rename from web/dashboard.html rename to web/gui/dashboard.html diff --git a/web/dashboard.js b/web/gui/dashboard.js similarity index 100% rename from web/dashboard.js rename to web/gui/dashboard.js diff --git a/web/dashboard.slate.css b/web/gui/dashboard.slate.css similarity index 100% rename from web/dashboard.slate.css rename to web/gui/dashboard.slate.css diff --git a/web/dashboard_info.js b/web/gui/dashboard_info.js similarity index 100% rename from web/dashboard_info.js rename to web/gui/dashboard_info.js diff --git a/web/dashboard_info_custom_example.js b/web/gui/dashboard_info_custom_example.js similarity index 100% rename from web/dashboard_info_custom_example.js rename to web/gui/dashboard_info_custom_example.js diff --git a/web/demo.html b/web/gui/demo.html similarity index 100% rename from web/demo.html rename to web/gui/demo.html diff --git a/web/demo2.html b/web/gui/demo2.html similarity index 100% rename from web/demo2.html rename to web/gui/demo2.html diff --git a/web/demosites.html b/web/gui/demosites.html similarity index 99% rename from web/demosites.html rename to web/gui/demosites.html index 34d3a7af8f..ed6fbf43e2 100644 --- a/web/demosites.html +++ b/web/gui/demosites.html @@ -706,9 +706,9 @@ p { and APM metrics via the embedded <b><a href="https://github.com/netdata/netdata/wiki/statsd" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=statsd>statsd server</a></b>. </div> <div class=grid-cell><h3><span class=star>★</span> Out of the box</h3> - <p>netdata supports <a href="https://github.com/netdata/netdata/tree/master/conf.d/python.d" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=AutoDetection>auto-detection</a> for everything. It collects more than 5000 metrics automatically, with + <p>netdata supports <a href="https://github.com/netdata/netdata/tree/master/collectors/python.d.plugin" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=AutoDetection>auto-detection</a> for everything. It collects more than 5000 metrics automatically, with <strong>zero configuration</strong>, it has <strong>zero dependencies</strong>, requires <strong>zero - maintenance</strong> and comes with more than <a href="https://github.com/netdata/netdata/tree/master/conf.d/health.d" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=AlarmConfigs>100 alarms</a> pre-configured to detect common + maintenance</strong> and comes with more than <a href="https://github.com/netdata/netdata/tree/master/health/health.d" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=AlarmConfigs>100 alarms</a> pre-configured to detect common failures, performance and availability issues. </div> <div class=grid-cell><h3><span class=star>★</span> In real-time</h3> diff --git a/web/demosites2.html b/web/gui/demosites2.html similarity index 100% rename from web/demosites2.html rename to web/gui/demosites2.html diff --git a/web/favicon.ico b/web/gui/favicon.ico similarity index 100% rename from web/favicon.ico rename to web/gui/favicon.ico diff --git a/web/fonts/glyphicons-halflings-regular.eot b/web/gui/fonts/glyphicons-halflings-regular.eot similarity index 100% rename from web/fonts/glyphicons-halflings-regular.eot rename to web/gui/fonts/glyphicons-halflings-regular.eot diff --git a/web/fonts/glyphicons-halflings-regular.svg b/web/gui/fonts/glyphicons-halflings-regular.svg similarity index 100% rename from web/fonts/glyphicons-halflings-regular.svg rename to web/gui/fonts/glyphicons-halflings-regular.svg diff --git a/web/fonts/glyphicons-halflings-regular.ttf b/web/gui/fonts/glyphicons-halflings-regular.ttf similarity index 100% rename from web/fonts/glyphicons-halflings-regular.ttf rename to web/gui/fonts/glyphicons-halflings-regular.ttf diff --git a/web/fonts/glyphicons-halflings-regular.woff b/web/gui/fonts/glyphicons-halflings-regular.woff similarity index 100% rename from web/fonts/glyphicons-halflings-regular.woff rename to web/gui/fonts/glyphicons-halflings-regular.woff diff --git a/web/fonts/glyphicons-halflings-regular.woff2 b/web/gui/fonts/glyphicons-halflings-regular.woff2 similarity index 100% rename from web/fonts/glyphicons-halflings-regular.woff2 rename to web/gui/fonts/glyphicons-halflings-regular.woff2 diff --git a/web/goto-host-from-alarm.html b/web/gui/goto-host-from-alarm.html similarity index 100% rename from web/goto-host-from-alarm.html rename to web/gui/goto-host-from-alarm.html diff --git a/web/images/README.md b/web/gui/images/README.md similarity index 100% rename from web/images/README.md rename to web/gui/images/README.md diff --git a/web/images/alert-128-orange.png b/web/gui/images/alert-128-orange.png similarity index 100% rename from web/images/alert-128-orange.png rename to web/gui/images/alert-128-orange.png diff --git a/web/images/alert-128-red.png b/web/gui/images/alert-128-red.png similarity index 100% rename from web/images/alert-128-red.png rename to web/gui/images/alert-128-red.png diff --git a/web/images/alert-multi-size-orange.ico b/web/gui/images/alert-multi-size-orange.ico similarity index 100% rename from web/images/alert-multi-size-orange.ico rename to web/gui/images/alert-multi-size-orange.ico diff --git a/web/images/alert-multi-size-red.ico b/web/gui/images/alert-multi-size-red.ico similarity index 100% rename from web/images/alert-multi-size-red.ico rename to web/gui/images/alert-multi-size-red.ico diff --git a/web/images/animated.gif b/web/gui/images/animated.gif similarity index 100% rename from web/images/animated.gif rename to web/gui/images/animated.gif diff --git a/web/images/check-mark-2-128-green.png b/web/gui/images/check-mark-2-128-green.png similarity index 100% rename from web/images/check-mark-2-128-green.png rename to web/gui/images/check-mark-2-128-green.png diff --git a/web/images/check-mark-2-multi-size-green.ico b/web/gui/images/check-mark-2-multi-size-green.ico similarity index 100% rename from web/images/check-mark-2-multi-size-green.ico rename to web/gui/images/check-mark-2-multi-size-green.ico diff --git a/web/images/netdata.svg b/web/gui/images/netdata.svg similarity index 100% rename from web/images/netdata.svg rename to web/gui/images/netdata.svg diff --git a/web/images/post.png b/web/gui/images/post.png similarity index 100% rename from web/images/post.png rename to web/gui/images/post.png diff --git a/web/images/seo-performance-114.png b/web/gui/images/seo-performance-114.png similarity index 100% rename from web/images/seo-performance-114.png rename to web/gui/images/seo-performance-114.png diff --git a/web/images/seo-performance-128.png b/web/gui/images/seo-performance-128.png similarity index 100% rename from web/images/seo-performance-128.png rename to web/gui/images/seo-performance-128.png diff --git a/web/images/seo-performance-16.png b/web/gui/images/seo-performance-16.png similarity index 100% rename from web/images/seo-performance-16.png rename to web/gui/images/seo-performance-16.png diff --git a/web/images/seo-performance-24.png b/web/gui/images/seo-performance-24.png similarity index 100% rename from web/images/seo-performance-24.png rename to web/gui/images/seo-performance-24.png diff --git a/web/images/seo-performance-256.png b/web/gui/images/seo-performance-256.png similarity index 100% rename from web/images/seo-performance-256.png rename to web/gui/images/seo-performance-256.png diff --git a/web/images/seo-performance-32.png b/web/gui/images/seo-performance-32.png similarity index 100% rename from web/images/seo-performance-32.png rename to web/gui/images/seo-performance-32.png diff --git a/web/images/seo-performance-48.png b/web/gui/images/seo-performance-48.png similarity index 100% rename from web/images/seo-performance-48.png rename to web/gui/images/seo-performance-48.png diff --git a/web/images/seo-performance-512.png b/web/gui/images/seo-performance-512.png similarity index 100% rename from web/images/seo-performance-512.png rename to web/gui/images/seo-performance-512.png diff --git a/web/images/seo-performance-64.png b/web/gui/images/seo-performance-64.png similarity index 100% rename from web/images/seo-performance-64.png rename to web/gui/images/seo-performance-64.png diff --git a/web/images/seo-performance-72.png b/web/gui/images/seo-performance-72.png similarity index 100% rename from web/images/seo-performance-72.png rename to web/gui/images/seo-performance-72.png diff --git a/web/images/seo-performance-multi-size.icns b/web/gui/images/seo-performance-multi-size.icns similarity index 100% rename from web/images/seo-performance-multi-size.icns rename to web/gui/images/seo-performance-multi-size.icns diff --git a/web/images/seo-performance-multi-size.ico b/web/gui/images/seo-performance-multi-size.ico similarity index 100% rename from web/images/seo-performance-multi-size.ico rename to web/gui/images/seo-performance-multi-size.ico diff --git a/web/index.html b/web/gui/index.html similarity index 100% rename from web/index.html rename to web/gui/index.html diff --git a/web/infographic.html b/web/gui/infographic.html similarity index 100% rename from web/infographic.html rename to web/gui/infographic.html diff --git a/web/lib/bootstrap-3.3.7.min.js b/web/gui/lib/bootstrap-3.3.7.min.js similarity index 100% rename from web/lib/bootstrap-3.3.7.min.js rename to web/gui/lib/bootstrap-3.3.7.min.js diff --git a/web/lib/bootstrap-slider-10.0.0.min.js b/web/gui/lib/bootstrap-slider-10.0.0.min.js similarity index 100% rename from web/lib/bootstrap-slider-10.0.0.min.js rename to web/gui/lib/bootstrap-slider-10.0.0.min.js diff --git a/web/lib/bootstrap-table-1.11.0.min.js b/web/gui/lib/bootstrap-table-1.11.0.min.js similarity index 100% rename from web/lib/bootstrap-table-1.11.0.min.js rename to web/gui/lib/bootstrap-table-1.11.0.min.js diff --git a/web/lib/bootstrap-table-export-1.11.0.min.js b/web/gui/lib/bootstrap-table-export-1.11.0.min.js similarity index 100% rename from web/lib/bootstrap-table-export-1.11.0.min.js rename to web/gui/lib/bootstrap-table-export-1.11.0.min.js diff --git a/web/lib/bootstrap-toggle-2.2.2.min.js b/web/gui/lib/bootstrap-toggle-2.2.2.min.js similarity index 100% rename from web/lib/bootstrap-toggle-2.2.2.min.js rename to web/gui/lib/bootstrap-toggle-2.2.2.min.js diff --git a/web/lib/c3-0.4.18.min.js b/web/gui/lib/c3-0.4.18.min.js similarity index 100% rename from web/lib/c3-0.4.18.min.js rename to web/gui/lib/c3-0.4.18.min.js diff --git a/web/lib/clipboard-polyfill-be05dad.js b/web/gui/lib/clipboard-polyfill-be05dad.js similarity index 100% rename from web/lib/clipboard-polyfill-be05dad.js rename to web/gui/lib/clipboard-polyfill-be05dad.js diff --git a/web/lib/d3-4.12.2.min.js b/web/gui/lib/d3-4.12.2.min.js similarity index 100% rename from web/lib/d3-4.12.2.min.js rename to web/gui/lib/d3-4.12.2.min.js diff --git a/web/lib/d3pie-0.2.1-netdata-3.js b/web/gui/lib/d3pie-0.2.1-netdata-3.js similarity index 100% rename from web/lib/d3pie-0.2.1-netdata-3.js rename to web/gui/lib/d3pie-0.2.1-netdata-3.js diff --git a/web/lib/dygraph-c91c859.min.js b/web/gui/lib/dygraph-c91c859.min.js similarity index 100% rename from web/lib/dygraph-c91c859.min.js rename to web/gui/lib/dygraph-c91c859.min.js diff --git a/web/lib/dygraph-smooth-plotter-c91c859.js b/web/gui/lib/dygraph-smooth-plotter-c91c859.js similarity index 100% rename from web/lib/dygraph-smooth-plotter-c91c859.js rename to web/gui/lib/dygraph-smooth-plotter-c91c859.js diff --git a/web/lib/fontawesome-all-5.0.1.min.js b/web/gui/lib/fontawesome-all-5.0.1.min.js similarity index 100% rename from web/lib/fontawesome-all-5.0.1.min.js rename to web/gui/lib/fontawesome-all-5.0.1.min.js diff --git a/web/lib/gauge-1.3.2.min.js b/web/gui/lib/gauge-1.3.2.min.js similarity index 100% rename from web/lib/gauge-1.3.2.min.js rename to web/gui/lib/gauge-1.3.2.min.js diff --git a/web/lib/jquery-2.2.4.min.js b/web/gui/lib/jquery-2.2.4.min.js similarity index 100% rename from web/lib/jquery-2.2.4.min.js rename to web/gui/lib/jquery-2.2.4.min.js diff --git a/web/lib/jquery.easypiechart-97b5824.min.js b/web/gui/lib/jquery.easypiechart-97b5824.min.js similarity index 100% rename from web/lib/jquery.easypiechart-97b5824.min.js rename to web/gui/lib/jquery.easypiechart-97b5824.min.js diff --git a/web/lib/jquery.peity-3.2.0.min.js b/web/gui/lib/jquery.peity-3.2.0.min.js similarity index 100% rename from web/lib/jquery.peity-3.2.0.min.js rename to web/gui/lib/jquery.peity-3.2.0.min.js diff --git a/web/lib/jquery.sparkline-2.1.2.min.js b/web/gui/lib/jquery.sparkline-2.1.2.min.js similarity index 100% rename from web/lib/jquery.sparkline-2.1.2.min.js rename to web/gui/lib/jquery.sparkline-2.1.2.min.js diff --git a/web/lib/lz-string-1.4.4.min.js b/web/gui/lib/lz-string-1.4.4.min.js similarity index 100% rename from web/lib/lz-string-1.4.4.min.js rename to web/gui/lib/lz-string-1.4.4.min.js diff --git a/web/lib/morris-0.5.1.min.js b/web/gui/lib/morris-0.5.1.min.js similarity index 100% rename from web/lib/morris-0.5.1.min.js rename to web/gui/lib/morris-0.5.1.min.js diff --git a/web/lib/pako-1.0.6.min.js b/web/gui/lib/pako-1.0.6.min.js similarity index 100% rename from web/lib/pako-1.0.6.min.js rename to web/gui/lib/pako-1.0.6.min.js diff --git a/web/lib/perfect-scrollbar-0.6.15.min.js b/web/gui/lib/perfect-scrollbar-0.6.15.min.js similarity index 100% rename from web/lib/perfect-scrollbar-0.6.15.min.js rename to web/gui/lib/perfect-scrollbar-0.6.15.min.js diff --git a/web/lib/raphael-2.2.4-min.js b/web/gui/lib/raphael-2.2.4-min.js similarity index 100% rename from web/lib/raphael-2.2.4-min.js rename to web/gui/lib/raphael-2.2.4-min.js diff --git a/web/lib/tableExport-1.6.0.min.js b/web/gui/lib/tableExport-1.6.0.min.js similarity index 100% rename from web/lib/tableExport-1.6.0.min.js rename to web/gui/lib/tableExport-1.6.0.min.js diff --git a/web/netdata-swagger.json b/web/gui/netdata-swagger.json similarity index 100% rename from web/netdata-swagger.json rename to web/gui/netdata-swagger.json diff --git a/web/netdata-swagger.yaml b/web/gui/netdata-swagger.yaml similarity index 100% rename from web/netdata-swagger.yaml rename to web/gui/netdata-swagger.yaml diff --git a/web/refresh-badges.js b/web/gui/refresh-badges.js similarity index 100% rename from web/refresh-badges.js rename to web/gui/refresh-badges.js diff --git a/web/registry.html b/web/gui/registry.html similarity index 100% rename from web/registry.html rename to web/gui/registry.html diff --git a/web/robots.txt b/web/gui/robots.txt similarity index 100% rename from web/robots.txt rename to web/gui/robots.txt diff --git a/web/sitemap.xml b/web/gui/sitemap.xml similarity index 100% rename from web/sitemap.xml rename to web/gui/sitemap.xml diff --git a/web/tv.html b/web/gui/tv.html similarity index 100% rename from web/tv.html rename to web/gui/tv.html diff --git a/web/server/Makefile.am b/web/server/Makefile.am new file mode 100644 index 0000000000..843c4cc9bf --- /dev/null +++ b/web/server/Makefile.am @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +SUBDIRS = \ + single \ + multi \ + static \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/server/README.md b/web/server/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/web/server/multi/Makefile.am b/web/server/multi/Makefile.am new file mode 100644 index 0000000000..90cc9ca1eb --- /dev/null +++ b/web/server/multi/Makefile.am @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +SUBDIRS = \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/server/multi/README.md b/web/server/multi/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/web/server/multi/multi-threaded.c b/web/server/multi/multi-threaded.c new file mode 100644 index 0000000000..37bdd38ad2 --- /dev/null +++ b/web/server/multi/multi-threaded.c @@ -0,0 +1,314 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#define WEB_SERVER_INTERNALS 1 +#include "multi-threaded.h" + +// -------------------------------------------------------------------------------------- +// the thread of a single client - for the MULTI-THREADED web server + +// 1. waits for input and output, using async I/O +// 2. it processes HTTP requests +// 3. it generates HTTP responses +// 4. it copies data from input to output if mode is FILECOPY + +int web_client_timeout = DEFAULT_DISCONNECT_IDLE_WEB_CLIENTS_AFTER_SECONDS; +int web_client_first_request_timeout = DEFAULT_TIMEOUT_TO_RECEIVE_FIRST_WEB_REQUEST; +long web_client_streaming_rate_t = 0L; + +static void multi_threaded_web_client_worker_main_cleanup(void *ptr) { + struct web_client *w = ptr; + WEB_CLIENT_IS_DEAD(w); + w->running = 0; +} + +static void *multi_threaded_web_client_worker_main(void *ptr) { + netdata_thread_cleanup_push(multi_threaded_web_client_worker_main_cleanup, ptr); + + struct web_client *w = ptr; + w->running = 1; + + struct pollfd fds[2], *ifd, *ofd; + int retval, timeout_ms; + nfds_t fdmax = 0; + + while(!netdata_exit) { + if(unlikely(web_client_check_dead(w))) { + debug(D_WEB_CLIENT, "%llu: client is dead.", w->id); + break; + } + else if(unlikely(!web_client_has_wait_receive(w) && !web_client_has_wait_send(w))) { + debug(D_WEB_CLIENT, "%llu: client is not set for neither receiving nor sending data.", w->id); + break; + } + + if(unlikely(w->ifd < 0 || w->ofd < 0)) { + error("%llu: invalid file descriptor, ifd = %d, ofd = %d (required 0 <= fd", w->id, w->ifd, w->ofd); + break; + } + + if(w->ifd == w->ofd) { + fds[0].fd = w->ifd; + fds[0].events = 0; + fds[0].revents = 0; + + if(web_client_has_wait_receive(w)) fds[0].events |= POLLIN; + if(web_client_has_wait_send(w)) fds[0].events |= POLLOUT; + + fds[1].fd = -1; + fds[1].events = 0; + fds[1].revents = 0; + + ifd = ofd = &fds[0]; + + fdmax = 1; + } + else { + fds[0].fd = w->ifd; + fds[0].events = 0; + fds[0].revents = 0; + if(web_client_has_wait_receive(w)) fds[0].events |= POLLIN; + ifd = &fds[0]; + + fds[1].fd = w->ofd; + fds[1].events = 0; + fds[1].revents = 0; + if(web_client_has_wait_send(w)) fds[1].events |= POLLOUT; + ofd = &fds[1]; + + fdmax = 2; + } + + debug(D_WEB_CLIENT, "%llu: Waiting socket async I/O for %s %s", w->id, web_client_has_wait_receive(w)?"INPUT":"", web_client_has_wait_send(w)?"OUTPUT":""); + errno = 0; + timeout_ms = web_client_timeout * 1000; + retval = poll(fds, fdmax, timeout_ms); + + if(unlikely(netdata_exit)) break; + + if(unlikely(retval == -1)) { + if(errno == EAGAIN || errno == EINTR) { + debug(D_WEB_CLIENT, "%llu: EAGAIN received.", w->id); + continue; + } + + debug(D_WEB_CLIENT, "%llu: LISTENER: poll() failed (input fd = %d, output fd = %d). Closing client.", w->id, w->ifd, w->ofd); + break; + } + else if(unlikely(!retval)) { + debug(D_WEB_CLIENT, "%llu: Timeout while waiting socket async I/O for %s %s", w->id, web_client_has_wait_receive(w)?"INPUT":"", web_client_has_wait_send(w)?"OUTPUT":""); + break; + } + + if(unlikely(netdata_exit)) break; + + int used = 0; + if(web_client_has_wait_send(w) && ofd->revents & POLLOUT) { + used++; + if(web_client_send(w) < 0) { + debug(D_WEB_CLIENT, "%llu: Cannot send data to client. Closing client.", w->id); + break; + } + } + + if(unlikely(netdata_exit)) break; + + if(web_client_has_wait_receive(w) && (ifd->revents & POLLIN || ifd->revents & POLLPRI)) { + used++; + if(web_client_receive(w) < 0) { + debug(D_WEB_CLIENT, "%llu: Cannot receive data from client. Closing client.", w->id); + break; + } + + if(w->mode == WEB_CLIENT_MODE_NORMAL) { + debug(D_WEB_CLIENT, "%llu: Attempting to process received data.", w->id); + web_client_process_request(w); + + // if the sockets are closed, may have transferred this client + // to plugins.d + if(unlikely(w->mode == WEB_CLIENT_MODE_STREAM)) + break; + } + } + + if(unlikely(!used)) { + debug(D_WEB_CLIENT_ACCESS, "%llu: Received error on socket.", w->id); + break; + } + } + + if(w->mode != WEB_CLIENT_MODE_STREAM) + web_server_log_connection(w, "DISCONNECTED"); + + web_client_request_done(w); + + debug(D_WEB_CLIENT, "%llu: done...", w->id); + + // close the sockets/files now + // to free file descriptors + if(w->ifd == w->ofd) { + if(w->ifd != -1) close(w->ifd); + } + else { + if(w->ifd != -1) close(w->ifd); + if(w->ofd != -1) close(w->ofd); + } + w->ifd = -1; + w->ofd = -1; + + netdata_thread_cleanup_pop(1); + return NULL; +} + +// -------------------------------------------------------------------------------------- +// the main socket listener - MULTI-THREADED + +// 1. it accepts new incoming requests on our port +// 2. creates a new web_client for each connection received +// 3. spawns a new netdata_thread to serve the client (this is optimal for keep-alive clients) +// 4. cleans up old web_clients that their netdata_threads have been exited + +static void web_client_multi_threaded_web_server_release_clients(void) { + struct web_client *w; + for(w = web_clients_cache.used; w ; ) { + if(unlikely(!w->running && web_client_check_dead(w))) { + struct web_client *t = w->next; + web_client_release(w); + w = t; + } + else + w = w->next; + } +} + +static void web_client_multi_threaded_web_server_stop_all_threads(void) { + struct web_client *w; + + int found = 1; + usec_t max = 2 * USEC_PER_SEC, step = 50000; + for(w = web_clients_cache.used; w ; w = w->next) { + if(w->running) { + found++; + info("stopping web client %s, id %llu", w->client_ip, w->id); + netdata_thread_cancel(w->thread); + } + } + + while(found && max > 0) { + max -= step; + info("Waiting %d web threads to finish...", found); + sleep_usec(step); + found = 0; + for(w = web_clients_cache.used; w ; w = w->next) + if(w->running) found++; + } + + if(found) + error("%d web threads are taking too long to finish. Giving up.", found); +} + +static struct pollfd *socket_listen_main_multi_threaded_fds = NULL; + +static void socket_listen_main_multi_threaded_cleanup(void *data) { + struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data; + static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; + + info("cleaning up..."); + + info("releasing allocated memory..."); + freez(socket_listen_main_multi_threaded_fds); + + info("closing all sockets..."); + listen_sockets_close(&api_sockets); + + info("stopping all running web server threads..."); + web_client_multi_threaded_web_server_stop_all_threads(); + + info("freeing web clients cache..."); + web_client_cache_destroy(); + + info("cleanup completed."); + static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; +} + +#define CLEANUP_EVERY_EVENTS 60 +void *socket_listen_main_multi_threaded(void *ptr) { + netdata_thread_cleanup_push(socket_listen_main_multi_threaded_cleanup, ptr); + + web_server_mode = WEB_SERVER_MODE_MULTI_THREADED; + web_server_is_multithreaded = 1; + + struct web_client *w; + int retval, counter = 0; + + if(!api_sockets.opened) + fatal("LISTENER: No sockets to listen to."); + + socket_listen_main_multi_threaded_fds = callocz(sizeof(struct pollfd), api_sockets.opened); + + size_t i; + for(i = 0; i < api_sockets.opened ;i++) { + socket_listen_main_multi_threaded_fds[i].fd = api_sockets.fds[i]; + socket_listen_main_multi_threaded_fds[i].events = POLLIN; + socket_listen_main_multi_threaded_fds[i].revents = 0; + + info("Listening on '%s'", (api_sockets.fds_names[i])?api_sockets.fds_names[i]:"UNKNOWN"); + } + + int timeout_ms = 1 * 1000; + + while(!netdata_exit) { + + // debug(D_WEB_CLIENT, "LISTENER: Waiting..."); + retval = poll(socket_listen_main_multi_threaded_fds, api_sockets.opened, timeout_ms); + + if(unlikely(retval == -1)) { + error("LISTENER: poll() failed."); + continue; + } + else if(unlikely(!retval)) { + debug(D_WEB_CLIENT, "LISTENER: poll() timeout."); + counter++; + continue; + } + + for(i = 0 ; i < api_sockets.opened ; i++) { + short int revents = socket_listen_main_multi_threaded_fds[i].revents; + + // check for new incoming connections + if(revents & POLLIN || revents & POLLPRI) { + socket_listen_main_multi_threaded_fds[i].revents = 0; + + w = web_client_create_on_listenfd(socket_listen_main_multi_threaded_fds[i].fd); + if(unlikely(!w)) { + // no need for error log - web_client_create_on_listenfd already logged the error + continue; + } + + if(api_sockets.fds_families[i] == AF_UNIX) + web_client_set_unix(w); + else + web_client_set_tcp(w); + + char tag[NETDATA_THREAD_TAG_MAX + 1]; + snprintfz(tag, NETDATA_THREAD_TAG_MAX, "WEB_CLIENT[%llu,[%s]:%s]", w->id, w->client_ip, w->client_port); + + w->running = 1; + if(netdata_thread_create(&w->thread, tag, NETDATA_THREAD_OPTION_DONT_LOG, multi_threaded_web_client_worker_main, w) != 0) { + w->running = 0; + web_client_release(w); + } + } + } + + counter++; + if(counter > CLEANUP_EVERY_EVENTS) { + counter = 0; + web_client_multi_threaded_web_server_release_clients(); + } + } + + netdata_thread_cleanup_pop(1); + return NULL; +} + + diff --git a/web/server/multi/multi-threaded.h b/web/server/multi/multi-threaded.h new file mode 100644 index 0000000000..d7ebf3c54d --- /dev/null +++ b/web/server/multi/multi-threaded.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_WEB_SERVER_MULTI_THREADED_H +#define NETDATA_WEB_SERVER_MULTI_THREADED_H + +#include "web/server/web_server.h" + +extern void *socket_listen_main_multi_threaded(void *ptr); + +#endif //NETDATA_WEB_SERVER_MULTI_THREADED_H diff --git a/web/server/single/Makefile.am b/web/server/single/Makefile.am new file mode 100644 index 0000000000..90cc9ca1eb --- /dev/null +++ b/web/server/single/Makefile.am @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +SUBDIRS = \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/server/single/README.md b/web/server/single/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/web/server/single/single-threaded.c b/web/server/single/single-threaded.c new file mode 100644 index 0000000000..7e89ee683b --- /dev/null +++ b/web/server/single/single-threaded.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#define WEB_SERVER_INTERNALS 1 +#include "single-threaded.h" + +// -------------------------------------------------------------------------------------- +// the main socket listener - SINGLE-THREADED + +struct web_client *single_threaded_clients[FD_SETSIZE]; + +static inline int single_threaded_link_client(struct web_client *w, fd_set *ifds, fd_set *ofds, fd_set *efds, int *max) { + if(unlikely(web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w)))) { + return 1; + } + + if(unlikely(w->ifd < 0 || w->ifd >= (int)FD_SETSIZE || w->ofd < 0 || w->ofd >= (int)FD_SETSIZE)) { + error("%llu: invalid file descriptor, ifd = %d, ofd = %d (required 0 <= fd < FD_SETSIZE (%d)", w->id, w->ifd, w->ofd, (int)FD_SETSIZE); + return 1; + } + + FD_SET(w->ifd, efds); + if(unlikely(*max < w->ifd)) *max = w->ifd; + + if(unlikely(w->ifd != w->ofd)) { + if(*max < w->ofd) *max = w->ofd; + FD_SET(w->ofd, efds); + } + + if(web_client_has_wait_receive(w)) FD_SET(w->ifd, ifds); + if(web_client_has_wait_send(w)) FD_SET(w->ofd, ofds); + + single_threaded_clients[w->ifd] = w; + single_threaded_clients[w->ofd] = w; + + return 0; +} + +static inline int single_threaded_unlink_client(struct web_client *w, fd_set *ifds, fd_set *ofds, fd_set *efds) { + FD_CLR(w->ifd, efds); + if(unlikely(w->ifd != w->ofd)) FD_CLR(w->ofd, efds); + + if(web_client_has_wait_receive(w)) FD_CLR(w->ifd, ifds); + if(web_client_has_wait_send(w)) FD_CLR(w->ofd, ofds); + + single_threaded_clients[w->ifd] = NULL; + single_threaded_clients[w->ofd] = NULL; + + if(unlikely(web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w)))) { + return 1; + } + + return 0; +} + +static void socket_listen_main_single_threaded_cleanup(void *data) { + struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data; + static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; + + info("closing all sockets..."); + listen_sockets_close(&api_sockets); + + info("freeing web clients cache..."); + web_client_cache_destroy(); + + info("cleanup completed."); + static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; +} + +void *socket_listen_main_single_threaded(void *ptr) { + netdata_thread_cleanup_push(socket_listen_main_single_threaded_cleanup, ptr); + web_server_mode = WEB_SERVER_MODE_SINGLE_THREADED; + web_server_is_multithreaded = 0; + + struct web_client *w; + + if(!api_sockets.opened) + fatal("LISTENER: no listen sockets available."); + + size_t i; + for(i = 0; i < (size_t)FD_SETSIZE ; i++) + single_threaded_clients[i] = NULL; + + fd_set ifds, ofds, efds, rifds, rofds, refds; + FD_ZERO (&ifds); + FD_ZERO (&ofds); + FD_ZERO (&efds); + int fdmax = 0; + + for(i = 0; i < api_sockets.opened ; i++) { + if (api_sockets.fds[i] < 0 || api_sockets.fds[i] >= (int)FD_SETSIZE) + fatal("LISTENER: Listen socket %d is not ready, or invalid.", api_sockets.fds[i]); + + info("Listening on '%s'", (api_sockets.fds_names[i])?api_sockets.fds_names[i]:"UNKNOWN"); + + FD_SET(api_sockets.fds[i], &ifds); + FD_SET(api_sockets.fds[i], &efds); + if(fdmax < api_sockets.fds[i]) + fdmax = api_sockets.fds[i]; + } + + while(!netdata_exit) { + debug(D_WEB_CLIENT_ACCESS, "LISTENER: single threaded web server waiting (fdmax = %d)...", fdmax); + + struct timeval tv = { .tv_sec = 1, .tv_usec = 0 }; + rifds = ifds; + rofds = ofds; + refds = efds; + int retval = select(fdmax+1, &rifds, &rofds, &refds, &tv); + + if(unlikely(retval == -1)) { + error("LISTENER: select() failed."); + continue; + } + else if(likely(retval)) { + debug(D_WEB_CLIENT_ACCESS, "LISTENER: got something."); + + for(i = 0; i < api_sockets.opened ; i++) { + if (FD_ISSET(api_sockets.fds[i], &rifds)) { + debug(D_WEB_CLIENT_ACCESS, "LISTENER: new connection."); + w = web_client_create_on_listenfd(api_sockets.fds[i]); + if(unlikely(!w)) + continue; + + if(api_sockets.fds_families[i] == AF_UNIX) + web_client_set_unix(w); + else + web_client_set_tcp(w); + + if (single_threaded_link_client(w, &ifds, &ofds, &ifds, &fdmax) != 0) { + web_client_release(w); + } + } + } + + for(i = 0 ; i <= (size_t)fdmax ; i++) { + if(likely(!FD_ISSET(i, &rifds) && !FD_ISSET(i, &rofds) && !FD_ISSET(i, &refds))) + continue; + + w = single_threaded_clients[i]; + if(unlikely(!w)) { + // error("no client on slot %zu", i); + continue; + } + + if(unlikely(single_threaded_unlink_client(w, &ifds, &ofds, &efds) != 0)) { + // error("failed to unlink client %zu", i); + web_client_release(w); + continue; + } + + if (unlikely(FD_ISSET(w->ifd, &refds) || FD_ISSET(w->ofd, &refds))) { + // error("no input on client %zu", i); + web_client_release(w); + continue; + } + + if (unlikely(web_client_has_wait_receive(w) && FD_ISSET(w->ifd, &rifds))) { + if (unlikely(web_client_receive(w) < 0)) { + // error("cannot read from client %zu", i); + web_client_release(w); + continue; + } + + if (w->mode != WEB_CLIENT_MODE_FILECOPY) { + debug(D_WEB_CLIENT, "%llu: Processing received data.", w->id); + web_client_process_request(w); + } + } + + if (unlikely(web_client_has_wait_send(w) && FD_ISSET(w->ofd, &rofds))) { + if (unlikely(web_client_send(w) < 0)) { + // error("cannot send data to client %zu", i); + debug(D_WEB_CLIENT, "%llu: Cannot send data to client. Closing client.", w->id); + web_client_release(w); + continue; + } + } + + if(unlikely(single_threaded_link_client(w, &ifds, &ofds, &efds, &fdmax) != 0)) { + // error("failed to link client %zu", i); + web_client_release(w); + } + } + } + else { + debug(D_WEB_CLIENT_ACCESS, "LISTENER: single threaded web server timeout."); + } + } + + netdata_thread_cleanup_pop(1); + return NULL; +} + + diff --git a/web/server/single/single-threaded.h b/web/server/single/single-threaded.h new file mode 100644 index 0000000000..fab4ceba1d --- /dev/null +++ b/web/server/single/single-threaded.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_WEB_SERVER_SINGLE_THREADED_H +#define NETDATA_WEB_SERVER_SINGLE_THREADED_H + +#include "web/server/web_server.h" + +extern void *socket_listen_main_single_threaded(void *ptr); + +#endif //NETDATA_WEB_SERVER_SINGLE_THREADED_H diff --git a/web/server/static/Makefile.am b/web/server/static/Makefile.am new file mode 100644 index 0000000000..90cc9ca1eb --- /dev/null +++ b/web/server/static/Makefile.am @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +SUBDIRS = \ + $(NULL) + +dist_noinst_DATA = \ + README.md \ + $(NULL) diff --git a/web/server/static/README.md b/web/server/static/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/web/server/static/static-threaded.c b/web/server/static/static-threaded.c new file mode 100644 index 0000000000..a037390b8a --- /dev/null +++ b/web/server/static/static-threaded.c @@ -0,0 +1,422 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#define WEB_SERVER_INTERNALS 1 +#include "static-threaded.h" + +// ---------------------------------------------------------------------------- +// high level web clients connection management + +static struct web_client *web_client_create_on_fd(int fd, const char *client_ip, const char *client_port) { + struct web_client *w; + + w = web_client_get_from_cache_or_allocate(); + w->ifd = w->ofd = fd; + + strncpyz(w->client_ip, client_ip, sizeof(w->client_ip) - 1); + strncpyz(w->client_port, client_port, sizeof(w->client_port) - 1); + + if(unlikely(!*w->client_ip)) strcpy(w->client_ip, "-"); + if(unlikely(!*w->client_port)) strcpy(w->client_port, "-"); + + web_client_initialize_connection(w); + return(w); +} + +// -------------------------------------------------------------------------------------- +// the main socket listener - STATIC-THREADED + +struct web_server_static_threaded_worker { + netdata_thread_t thread; + + int id; + int running; + + size_t max_sockets; + + volatile size_t connected; + volatile size_t disconnected; + volatile size_t receptions; + volatile size_t sends; + volatile size_t max_concurrent; + + volatile size_t files_read; + volatile size_t file_reads; +}; + +static long long static_threaded_workers_count = 1; +static struct web_server_static_threaded_worker *static_workers_private_data = NULL; +static __thread struct web_server_static_threaded_worker *worker_private = NULL; + +// ---------------------------------------------------------------------------- + +static inline int web_server_check_client_status(struct web_client *w) { + if(unlikely(web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w)))) + return -1; + + return 0; +} + +// ---------------------------------------------------------------------------- +// web server files + +static void *web_server_file_add_callback(POLLINFO *pi, short int *events, void *data) { + struct web_client *w = (struct web_client *)data; + + worker_private->files_read++; + + debug(D_WEB_CLIENT, "%llu: ADDED FILE READ ON FD %d", w->id, pi->fd); + *events = POLLIN; + pi->data = w; + return w; +} + +static void web_werver_file_del_callback(POLLINFO *pi) { + struct web_client *w = (struct web_client *)pi->data; + debug(D_WEB_CLIENT, "%llu: RELEASE FILE READ ON FD %d", w->id, pi->fd); + + w->pollinfo_filecopy_slot = 0; + + if(unlikely(!w->pollinfo_slot)) { + debug(D_WEB_CLIENT, "%llu: CROSS WEB CLIENT CLEANUP (iFD %d, oFD %d)", w->id, pi->fd, w->ofd); + web_client_release(w); + } +} + +static int web_server_file_read_callback(POLLINFO *pi, short int *events) { + struct web_client *w = (struct web_client *)pi->data; + + // if there is no POLLINFO linked to this, it means the client disconnected + // stop the file reading too + if(unlikely(!w->pollinfo_slot)) { + debug(D_WEB_CLIENT, "%llu: PREVENTED ATTEMPT TO READ FILE ON FD %d, ON CLOSED WEB CLIENT", w->id, pi->fd); + return -1; + } + + if(unlikely(w->mode != WEB_CLIENT_MODE_FILECOPY || w->ifd == w->ofd)) { + debug(D_WEB_CLIENT, "%llu: PREVENTED ATTEMPT TO READ FILE ON FD %d, ON NON-FILECOPY WEB CLIENT", w->id, pi->fd); + return -1; + } + + debug(D_WEB_CLIENT, "%llu: READING FILE ON FD %d", w->id, pi->fd); + + worker_private->file_reads++; + ssize_t ret = unlikely(web_client_read_file(w)); + + if(likely(web_client_has_wait_send(w))) { + POLLJOB *p = pi->p; // our POLLJOB + POLLINFO *wpi = pollinfo_from_slot(p, w->pollinfo_slot); // POLLINFO of the client socket + + debug(D_WEB_CLIENT, "%llu: SIGNALING W TO SEND (iFD %d, oFD %d)", w->id, pi->fd, wpi->fd); + p->fds[wpi->slot].events |= POLLOUT; + } + + if(unlikely(ret <= 0 || w->ifd == w->ofd)) { + debug(D_WEB_CLIENT, "%llu: DONE READING FILE ON FD %d", w->id, pi->fd); + return -1; + } + + *events = POLLIN; + return 0; +} + +static int web_server_file_write_callback(POLLINFO *pi, short int *events) { + (void)pi; + (void)events; + + error("Writing to web files is not supported!"); + + return -1; +} + +// ---------------------------------------------------------------------------- +// web server clients + +static void *web_server_add_callback(POLLINFO *pi, short int *events, void *data) { + (void)data; + + worker_private->connected++; + + size_t concurrent = worker_private->connected - worker_private->disconnected; + if(unlikely(concurrent > worker_private->max_concurrent)) + worker_private->max_concurrent = concurrent; + + *events = POLLIN; + + debug(D_WEB_CLIENT_ACCESS, "LISTENER on %d: new connection.", pi->fd); + struct web_client *w = web_client_create_on_fd(pi->fd, pi->client_ip, pi->client_port); + w->pollinfo_slot = pi->slot; + + if(unlikely(pi->socktype == AF_UNIX)) + web_client_set_unix(w); + else + web_client_set_tcp(w); + + debug(D_WEB_CLIENT, "%llu: ADDED CLIENT FD %d", w->id, pi->fd); + return w; +} + +// TCP client disconnected +static void web_server_del_callback(POLLINFO *pi) { + worker_private->disconnected++; + + struct web_client *w = (struct web_client *)pi->data; + + w->pollinfo_slot = 0; + if(unlikely(w->pollinfo_filecopy_slot)) { + POLLINFO *fpi = pollinfo_from_slot(pi->p, w->pollinfo_filecopy_slot); // POLLINFO of the client socket + debug(D_WEB_CLIENT, "%llu: THE CLIENT WILL BE FRED BY READING FILE JOB ON FD %d", w->id, fpi->fd); + } + else { + if(web_client_flag_check(w, WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET)) + pi->flags |= POLLINFO_FLAG_DONT_CLOSE; + + debug(D_WEB_CLIENT, "%llu: CLOSING CLIENT FD %d", w->id, pi->fd); + web_client_release(w); + } +} + +static int web_server_rcv_callback(POLLINFO *pi, short int *events) { + worker_private->receptions++; + + struct web_client *w = (struct web_client *)pi->data; + int fd = pi->fd; + + if(unlikely(web_client_receive(w) < 0)) + return -1; + + debug(D_WEB_CLIENT, "%llu: processing received data on fd %d.", w->id, fd); + web_client_process_request(w); + + if(unlikely(w->mode == WEB_CLIENT_MODE_FILECOPY)) { + if(w->pollinfo_filecopy_slot == 0) { + debug(D_WEB_CLIENT, "%llu: FILECOPY DETECTED ON FD %d", w->id, pi->fd); + + if (unlikely(w->ifd != -1 && w->ifd != w->ofd && w->ifd != fd)) { + // add a new socket to poll_events, with the same + debug(D_WEB_CLIENT, "%llu: CREATING FILECOPY SLOT ON FD %d", w->id, pi->fd); + + POLLINFO *fpi = poll_add_fd( + pi->p + , w->ifd + , 0 + , POLLINFO_FLAG_CLIENT_SOCKET + , "FILENAME" + , "" + , web_server_file_add_callback + , web_werver_file_del_callback + , web_server_file_read_callback + , web_server_file_write_callback + , (void *) w + ); + + if(fpi) + w->pollinfo_filecopy_slot = fpi->slot; + else { + error("Failed to add filecopy fd. Closing client."); + return -1; + } + } + } + } + else { + if(unlikely(w->ifd == fd && web_client_has_wait_receive(w))) + *events |= POLLIN; + } + + if(unlikely(w->ofd == fd && web_client_has_wait_send(w))) + *events |= POLLOUT; + + return web_server_check_client_status(w); +} + +static int web_server_snd_callback(POLLINFO *pi, short int *events) { + worker_private->sends++; + + struct web_client *w = (struct web_client *)pi->data; + int fd = pi->fd; + + debug(D_WEB_CLIENT, "%llu: sending data on fd %d.", w->id, fd); + + if(unlikely(web_client_send(w) < 0)) + return -1; + + if(unlikely(w->ifd == fd && web_client_has_wait_receive(w))) + *events |= POLLIN; + + if(unlikely(w->ofd == fd && web_client_has_wait_send(w))) + *events |= POLLOUT; + + return web_server_check_client_status(w); +} + +static void web_server_tmr_callback(void *timer_data) { + worker_private = (struct web_server_static_threaded_worker *)timer_data; + + static __thread RRDSET *st = NULL; + static __thread RRDDIM *rd_user = NULL, *rd_system = NULL; + + if(unlikely(!st)) { + char id[100 + 1]; + char title[100 + 1]; + + snprintfz(id, 100, "web_thread%d_cpu", worker_private->id + 1); + snprintfz(title, 100, "NetData web server thread No %d CPU usage", worker_private->id + 1); + + st = rrdset_create_localhost( + "netdata" + , id + , NULL + , "web" + , "netdata.web_cpu" + , title + , "milliseconds/s" + , "web" + , "stats" + , 132000 + worker_private->id + , default_rrd_update_every + , RRDSET_TYPE_STACKED + ); + + rd_user = rrddim_add(st, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); + rd_system = rrddim_add(st, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); + } + else + rrdset_next(st); + + struct rusage rusage; + getrusage(RUSAGE_THREAD, &rusage); + rrddim_set_by_pointer(st, rd_user, rusage.ru_utime.tv_sec * 1000000ULL + rusage.ru_utime.tv_usec); + rrddim_set_by_pointer(st, rd_system, rusage.ru_stime.tv_sec * 1000000ULL + rusage.ru_stime.tv_usec); + rrdset_done(st); +} + +// ---------------------------------------------------------------------------- +// web server worker thread + +static void socket_listen_main_static_threaded_worker_cleanup(void *ptr) { + worker_private = (struct web_server_static_threaded_worker *)ptr; + + info("freeing local web clients cache..."); + web_client_cache_destroy(); + + info("stopped after %zu connects, %zu disconnects (max concurrent %zu), %zu receptions and %zu sends", + worker_private->connected, + worker_private->disconnected, + worker_private->max_concurrent, + worker_private->receptions, + worker_private->sends + ); + + worker_private->running = 0; +} + +void *socket_listen_main_static_threaded_worker(void *ptr) { + worker_private = (struct web_server_static_threaded_worker *)ptr; + worker_private->running = 1; + + netdata_thread_cleanup_push(socket_listen_main_static_threaded_worker_cleanup, ptr); + + poll_events(&api_sockets + , web_server_add_callback + , web_server_del_callback + , web_server_rcv_callback + , web_server_snd_callback + , web_server_tmr_callback + , web_allow_connections_from + , NULL + , web_client_first_request_timeout + , web_client_timeout + , default_rrd_update_every * 1000 // timer_milliseconds + , ptr // timer_data + , worker_private->max_sockets + ); + + netdata_thread_cleanup_pop(1); + return NULL; +} + + +// ---------------------------------------------------------------------------- +// web server main thread - also becomes a worker + +static void socket_listen_main_static_threaded_cleanup(void *ptr) { + struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; + static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; + + int i, found = 0; + usec_t max = 2 * USEC_PER_SEC, step = 50000; + + // we start from 1, - 0 is self + for(i = 1; i < static_threaded_workers_count; i++) { + if(static_workers_private_data[i].running) { + found++; + info("stopping worker %d", i + 1); + netdata_thread_cancel(static_workers_private_data[i].thread); + } + else + info("found stopped worker %d", i + 1); + } + + while(found && max > 0) { + max -= step; + info("Waiting %d static web threads to finish...", found); + sleep_usec(step); + found = 0; + + // we start from 1, - 0 is self + for(i = 1; i < static_threaded_workers_count; i++) { + if (static_workers_private_data[i].running) + found++; + } + } + + if(found) + error("%d static web threads are taking too long to finish. Giving up.", found); + + info("closing all web server sockets..."); + listen_sockets_close(&api_sockets); + + info("all static web threads stopped."); + static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; +} + +void *socket_listen_main_static_threaded(void *ptr) { + netdata_thread_cleanup_push(socket_listen_main_static_threaded_cleanup, ptr); + web_server_mode = WEB_SERVER_MODE_STATIC_THREADED; + + if(!api_sockets.opened) + fatal("LISTENER: no listen sockets available."); + + // 6 threads is the optimal value + // since 6 are the parallel connections browsers will do + // so, if the machine has more CPUs, avoid using resources unnecessarily + int def_thread_count = (processors > 6)?6:processors; + + static_threaded_workers_count = config_get_number(CONFIG_SECTION_WEB, "web server threads", def_thread_count); + if(static_threaded_workers_count < 1) static_threaded_workers_count = 1; + + size_t max_sockets = (size_t)config_get_number(CONFIG_SECTION_WEB, "web server max sockets", (long long int)(rlimit_nofile.rlim_cur / 2)); + + static_workers_private_data = callocz((size_t)static_threaded_workers_count, sizeof(struct web_server_static_threaded_worker)); + + web_server_is_multithreaded = (static_threaded_workers_count > 1); + + int i; + for(i = 1; i < static_threaded_workers_count; i++) { + static_workers_private_data[i].id = i; + static_workers_private_data[i].max_sockets = max_sockets / static_threaded_workers_count; + + char tag[50 + 1]; + snprintfz(tag, 50, "WEB_SERVER[static%d]", i+1); + + info("starting worker %d", i+1); + netdata_thread_create(&static_workers_private_data[i].thread, tag, NETDATA_THREAD_OPTION_DEFAULT, socket_listen_main_static_threaded_worker, (void *)&static_workers_private_data[i]); + } + + // and the main one + static_workers_private_data[0].max_sockets = max_sockets / static_threaded_workers_count; + socket_listen_main_static_threaded_worker((void *)&static_workers_private_data[0]); + + netdata_thread_cleanup_pop(1); + return NULL; +} diff --git a/web/server/static/static-threaded.h b/web/server/static/static-threaded.h new file mode 100644 index 0000000000..5f4862e5b1 --- /dev/null +++ b/web/server/static/static-threaded.h @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_WEB_SERVER_STATIC_THREADED_H +#define NETDATA_WEB_SERVER_STATIC_THREADED_H + +#include "web/server/web_server.h" + +extern void *socket_listen_main_static_threaded(void *ptr); + +#endif //NETDATA_WEB_SERVER_STATIC_THREADED_H diff --git a/src/webserver/web_client.c b/web/server/web_client.c similarity index 100% rename from src/webserver/web_client.c rename to web/server/web_client.c diff --git a/src/webserver/web_client.h b/web/server/web_client.h similarity index 99% rename from src/webserver/web_client.h rename to web/server/web_client.h index 14293cac11..5cf188d520 100644 --- a/src/webserver/web_client.h +++ b/web/server/web_client.h @@ -3,7 +3,7 @@ #ifndef NETDATA_WEB_CLIENT_H #define NETDATA_WEB_CLIENT_H 1 -#include "../libnetdata/libnetdata.h" +#include "libnetdata/libnetdata.h" #ifdef NETDATA_WITH_ZLIB extern int web_enable_gzip, @@ -191,6 +191,6 @@ extern void buffer_data_options2string(BUFFER *wb, uint32_t options); extern int mysendfile(struct web_client *w, char *filename); -#include "../common.h" +#include "daemon/common.h" #endif diff --git a/web/server/web_client_cache.c b/web/server/web_client_cache.c new file mode 100644 index 0000000000..ab470560ed --- /dev/null +++ b/web/server/web_client_cache.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#define WEB_SERVER_INTERNALS 1 +#include "web_client_cache.h" + +// ---------------------------------------------------------------------------- +// allocate and free web_clients + +static void web_client_zero(struct web_client *w) { + // zero everything about it - but keep the buffers + + // remember the pointers to the buffers + BUFFER *b1 = w->response.data; + BUFFER *b2 = w->response.header; + BUFFER *b3 = w->response.header_output; + + // empty the buffers + buffer_flush(b1); + buffer_flush(b2); + buffer_flush(b3); + + freez(w->user_agent); + + // zero everything + memset(w, 0, sizeof(struct web_client)); + + // restore the pointers of the buffers + w->response.data = b1; + w->response.header = b2; + w->response.header_output = b3; +} + +static void web_client_free(struct web_client *w) { + buffer_free(w->response.header_output); + buffer_free(w->response.header); + buffer_free(w->response.data); + freez(w->user_agent); + freez(w); +} + +static struct web_client *web_client_alloc(void) { + struct web_client *w = callocz(1, sizeof(struct web_client)); + w->response.data = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE); + w->response.header = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE); + w->response.header_output = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE); + return w; +} + +// ---------------------------------------------------------------------------- +// web clients caching + +// When clients connect and disconnect, avoid allocating and releasing memory. +// Instead, when new clients get connected, reuse any memory previously allocated +// for serving web clients that are now disconnected. + +// The size of the cache is adaptive. It caches the structures of 2x +// the number of currently connected clients. + +// Comments per server: +// SINGLE-THREADED : 1 cache is maintained +// MULTI-THREADED : 1 cache is maintained +// STATIC-THREADED : 1 cache for each thred of the web server + +__thread struct clients_cache web_clients_cache = { + .pid = 0, + .used = NULL, + .used_count = 0, + .avail = NULL, + .avail_count = 0, + .allocated = 0, + .reused = 0 +}; + +inline void web_client_cache_verify(int force) { +#ifdef NETDATA_INTERNAL_CHECKS + static __thread size_t count = 0; + count++; + + if(unlikely(force || count > 1000)) { + count = 0; + + struct web_client *w; + size_t used = 0, avail = 0; + for(w = web_clients_cache.used; w ; w = w->next) used++; + for(w = web_clients_cache.avail; w ; w = w->next) avail++; + + info("web_client_cache has %zu (%zu) used and %zu (%zu) available clients, allocated %zu, reused %zu (hit %zu%%)." + , used, web_clients_cache.used_count + , avail, web_clients_cache.avail_count + , web_clients_cache.allocated + , web_clients_cache.reused + , (web_clients_cache.allocated + web_clients_cache.reused)?(web_clients_cache.reused * 100 / (web_clients_cache.allocated + web_clients_cache.reused)):0 + ); + } +#else + if(unlikely(force)) { + info("web_client_cache has %zu used and %zu available clients, allocated %zu, reused %zu (hit %zu%%)." + , web_clients_cache.used_count + , web_clients_cache.avail_count + , web_clients_cache.allocated + , web_clients_cache.reused + , (web_clients_cache.allocated + web_clients_cache.reused)?(web_clients_cache.reused * 100 / (web_clients_cache.allocated + web_clients_cache.reused)):0 + ); + } +#endif +} + +// destroy the cache and free all the memory it uses +void web_client_cache_destroy(void) { +#ifdef NETDATA_INTERNAL_CHECKS + if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid())) + error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid()); + + web_client_cache_verify(1); +#endif + + netdata_thread_disable_cancelability(); + + struct web_client *w, *t; + + w = web_clients_cache.used; + while(w) { + t = w; + w = w->next; + web_client_free(t); + } + web_clients_cache.used = NULL; + web_clients_cache.used_count = 0; + + w = web_clients_cache.avail; + while(w) { + t = w; + w = w->next; + web_client_free(t); + } + web_clients_cache.avail = NULL; + web_clients_cache.avail_count = 0; + + netdata_thread_enable_cancelability(); +} + +struct web_client *web_client_get_from_cache_or_allocate() { + +#ifdef NETDATA_INTERNAL_CHECKS + if(unlikely(web_clients_cache.pid == 0)) + web_clients_cache.pid = gettid(); + + if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid())) + error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid()); +#endif + + netdata_thread_disable_cancelability(); + + struct web_client *w = web_clients_cache.avail; + + if(w) { + // get it from avail + if (w == web_clients_cache.avail) web_clients_cache.avail = w->next; + if(w->prev) w->prev->next = w->next; + if(w->next) w->next->prev = w->prev; + web_clients_cache.avail_count--; + web_client_zero(w); + web_clients_cache.reused++; + } + else { + // allocate it + w = web_client_alloc(); + web_clients_cache.allocated++; + } + + // link it to used web clients + if (web_clients_cache.used) web_clients_cache.used->prev = w; + w->next = web_clients_cache.used; + w->prev = NULL; + web_clients_cache.used = w; + web_clients_cache.used_count++; + + // initialize it + w->id = web_client_connected(); + w->mode = WEB_CLIENT_MODE_NORMAL; + + netdata_thread_enable_cancelability(); + + return w; +} + +void web_client_release(struct web_client *w) { +#ifdef NETDATA_INTERNAL_CHECKS + if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid())) + error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid()); + + if(unlikely(w->running)) + error("%llu: releasing web client from %s port %s, but it still running.", w->id, w->client_ip, w->client_port); +#endif + + debug(D_WEB_CLIENT_ACCESS, "%llu: Closing web client from %s port %s.", w->id, w->client_ip, w->client_port); + + web_server_log_connection(w, "DISCONNECTED"); + web_client_request_done(w); + web_client_disconnected(); + + netdata_thread_disable_cancelability(); + + if(web_server_mode != WEB_SERVER_MODE_STATIC_THREADED) { + if (w->ifd != -1) close(w->ifd); + if (w->ofd != -1 && w->ofd != w->ifd) close(w->ofd); + w->ifd = w->ofd = -1; + } + + // unlink it from the used + if (w == web_clients_cache.used) web_clients_cache.used = w->next; + if(w->prev) w->prev->next = w->next; + if(w->next) w->next->prev = w->prev; + web_clients_cache.used_count--; + + if(web_clients_cache.avail_count >= 2 * web_clients_cache.used_count) { + // we have too many of them - free it + web_client_free(w); + } + else { + // link it to the avail + if (web_clients_cache.avail) web_clients_cache.avail->prev = w; + w->next = web_clients_cache.avail; + w->prev = NULL; + web_clients_cache.avail = w; + web_clients_cache.avail_count++; + } + + netdata_thread_enable_cancelability(); +} + diff --git a/web/server/web_client_cache.h b/web/server/web_client_cache.h new file mode 100644 index 0000000000..2cbba2c8b9 --- /dev/null +++ b/web/server/web_client_cache.h @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_WEB_CLIENT_CACHE_H +#define NETDATA_WEB_CLIENT_CACHE_H + +#include "web_server.h" + +struct clients_cache { + pid_t pid; + + struct web_client *used; // the structures of the currently connected clients + size_t used_count; // the count the currently connected clients + + struct web_client *avail; // the cached structures, available for future clients + size_t avail_count; // the number of cached structures + + size_t reused; // the number of re-uses + size_t allocated; // the number of allocations +}; + +extern __thread struct clients_cache web_clients_cache; + +extern void web_client_release(struct web_client *w); +extern void web_client_release(struct web_client *w); +extern struct web_client *web_client_get_from_cache_or_allocate(); +extern void web_client_cache_destroy(void); +extern void web_client_cache_verify(int force); + +#endif //NETDATA_WEB_CLIENT_CACHE_H diff --git a/web/server/web_server.c b/web/server/web_server.c new file mode 100644 index 0000000000..a32c6e8f51 --- /dev/null +++ b/web/server/web_server.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#define WEB_SERVER_INTERNALS 1 +#include "web_server.h" + +// this file includes 3 web servers: +// +// 1. single-threaded, based on select() +// 2. multi-threaded, based on poll() that spawns threads to handle the requests, based on select() +// 3. static-threaded, based on poll() using a fixed number of threads (configured at netdata.conf) + +WEB_SERVER_MODE web_server_mode = WEB_SERVER_MODE_STATIC_THREADED; + +// -------------------------------------------------------------------------------------- + +WEB_SERVER_MODE web_server_mode_id(const char *mode) { + if(!strcmp(mode, "none")) + return WEB_SERVER_MODE_NONE; + else if(!strcmp(mode, "single") || !strcmp(mode, "single-threaded")) + return WEB_SERVER_MODE_SINGLE_THREADED; + else if(!strcmp(mode, "static") || !strcmp(mode, "static-threaded")) + return WEB_SERVER_MODE_STATIC_THREADED; + else // if(!strcmp(mode, "multi") || !strcmp(mode, "multi-threaded")) + return WEB_SERVER_MODE_MULTI_THREADED; +} + +const char *web_server_mode_name(WEB_SERVER_MODE id) { + switch(id) { + case WEB_SERVER_MODE_NONE: + return "none"; + + case WEB_SERVER_MODE_SINGLE_THREADED: + return "single-threaded"; + + case WEB_SERVER_MODE_STATIC_THREADED: + return "static-threaded"; + + default: + case WEB_SERVER_MODE_MULTI_THREADED: + return "multi-threaded"; + } +} + +// -------------------------------------------------------------------------------------- +// API sockets + +LISTEN_SOCKETS api_sockets = { + .config_section = CONFIG_SECTION_WEB, + .default_bind_to = "*", + .default_port = API_LISTEN_PORT, + .backlog = API_LISTEN_BACKLOG +}; + +int api_listen_sockets_setup(void) { + int socks = listen_sockets_setup(&api_sockets); + + if(!socks) + fatal("LISTENER: Cannot listen on any API socket. Exiting..."); + + return socks; +} + + +// -------------------------------------------------------------------------------------- +// access lists + +SIMPLE_PATTERN *web_allow_connections_from = NULL; +SIMPLE_PATTERN *web_allow_streaming_from = NULL; +SIMPLE_PATTERN *web_allow_netdataconf_from = NULL; + +// WEB_CLIENT_ACL +SIMPLE_PATTERN *web_allow_dashboard_from = NULL; +SIMPLE_PATTERN *web_allow_registry_from = NULL; +SIMPLE_PATTERN *web_allow_badges_from = NULL; + +void web_client_update_acl_matches(struct web_client *w) { + w->acl = WEB_CLIENT_ACL_NONE; + + if(!web_allow_dashboard_from || simple_pattern_matches(web_allow_dashboard_from, w->client_ip)) + w->acl |= WEB_CLIENT_ACL_DASHBOARD; + + if(!web_allow_registry_from || simple_pattern_matches(web_allow_registry_from, w->client_ip)) + w->acl |= WEB_CLIENT_ACL_REGISTRY; + + if(!web_allow_badges_from || simple_pattern_matches(web_allow_badges_from, w->client_ip)) + w->acl |= WEB_CLIENT_ACL_BADGE; +} + + +// -------------------------------------------------------------------------------------- + +void web_server_log_connection(struct web_client *w, const char *msg) { + log_access("%llu: %d '[%s]:%s' '%s'", w->id, gettid(), w->client_ip, w->client_port, msg); +} + +// -------------------------------------------------------------------------------------- + +void web_client_initialize_connection(struct web_client *w) { + int flag = 1; + + if(unlikely(web_client_check_tcp(w) && setsockopt(w->ifd, IPPROTO_TCP, TCP_NODELAY, (char *) &flag, sizeof(int)) != 0)) + debug(D_WEB_CLIENT, "%llu: failed to enable TCP_NODELAY on socket fd %d.", w->id, w->ifd); + + flag = 1; + if(unlikely(setsockopt(w->ifd, SOL_SOCKET, SO_KEEPALIVE, (char *) &flag, sizeof(int)) != 0)) + debug(D_WEB_CLIENT, "%llu: failed to enable SO_KEEPALIVE on socket fd %d.", w->id, w->ifd); + + web_client_update_acl_matches(w); + + w->origin[0] = '*'; w->origin[1] = '\0'; + w->cookie1[0] = '\0'; w->cookie2[0] = '\0'; + freez(w->user_agent); w->user_agent = NULL; + + web_client_enable_wait_receive(w); + + web_server_log_connection(w, "CONNECTED"); + + web_client_cache_verify(0); +} + +struct web_client *web_client_create_on_listenfd(int listener) { + struct web_client *w; + + w = web_client_get_from_cache_or_allocate(); + w->ifd = w->ofd = accept_socket(listener, SOCK_NONBLOCK, w->client_ip, sizeof(w->client_ip), w->client_port, sizeof(w->client_port), web_allow_connections_from); + + if(unlikely(!*w->client_ip)) strcpy(w->client_ip, "-"); + if(unlikely(!*w->client_port)) strcpy(w->client_port, "-"); + + if (w->ifd == -1) { + if(errno == EPERM) + web_server_log_connection(w, "ACCESS DENIED"); + else { + web_server_log_connection(w, "CONNECTION FAILED"); + error("%llu: Failed to accept new incoming connection.", w->id); + } + + web_client_release(w); + return NULL; + } + + web_client_initialize_connection(w); + return(w); +} + diff --git a/src/webserver/web_server.h b/web/server/web_server.h similarity index 69% rename from src/webserver/web_server.h rename to web/server/web_server.h index a375f1cd42..05ac9c4616 100644 --- a/src/webserver/web_server.h +++ b/web/server/web_server.h @@ -3,7 +3,7 @@ #ifndef NETDATA_WEB_SERVER_H #define NETDATA_WEB_SERVER_H 1 -#include "../common.h" +#include "daemon/common.h" #include "web_client.h" #ifndef API_LISTEN_PORT @@ -33,9 +33,6 @@ extern WEB_SERVER_MODE web_server_mode; extern WEB_SERVER_MODE web_server_mode_id(const char *mode); extern const char *web_server_mode_name(WEB_SERVER_MODE id); -extern void *socket_listen_main_multi_threaded(void *ptr); -extern void *socket_listen_main_single_threaded(void *ptr); -extern void *socket_listen_main_static_threaded(void *ptr); extern int api_listen_sockets_setup(void); #define DEFAULT_TIMEOUT_TO_RECEIVE_FIRST_WEB_REQUEST 60 @@ -44,4 +41,18 @@ extern int web_client_timeout; extern int web_client_first_request_timeout; extern long web_client_streaming_rate_t; +#ifdef WEB_SERVER_INTERNALS +extern LISTEN_SOCKETS api_sockets; +extern void web_client_update_acl_matches(struct web_client *w); +extern void web_server_log_connection(struct web_client *w, const char *msg); +extern void web_client_initialize_connection(struct web_client *w); +extern struct web_client *web_client_create_on_listenfd(int listener); + +#include "web_client_cache.h" +#endif // WEB_SERVER_INTERNALS + +#include "single/single-threaded.h" +#include "multi/multi-threaded.h" +#include "static/static-threaded.h" + #endif /* NETDATA_WEB_SERVER_H */