mirror of
https://github.com/netdata/netdata.git
synced 2025-04-13 17:19:11 +00:00
parent
97a13e0361
commit
acca8c24f2
71 changed files with 128 additions and 128 deletions
.github/scripts
aclk
collectors
apps.plugin
cgroups.plugin
charts.d.plugin
ap
apcupsd
example
libreswan
nut
opensips
sensors
ebpf.plugin
README.mdebpf.cebpf_apps.cebpf_cachestat.cebpf_dcstat.cebpf_fd.cebpf_mount.cebpf_oomkill.cebpf_process.cebpf_shm.cebpf_socket.cebpf_swap.cebpf_vfs.c
node.d.plugin/named
proc.plugin
python.d.plugin
anomalies
changefinder
go_expvar
mongodb
postgres
zscores
statsd.plugin
daemon
database
docs
exporting
health
libnetdata
packaging
tests/profile
web
6
.github/scripts/check-updater.sh
vendored
6
.github/scripts/check-updater.sh
vendored
|
@ -4,20 +4,20 @@ set -e
|
|||
# shellcheck source=.github/scripts/functions.sh
|
||||
. "$(dirname "$0")/functions.sh"
|
||||
|
||||
check_successfull_update() {
|
||||
check_successful_update() {
|
||||
progress "Check netdata version after update"
|
||||
(
|
||||
netdata_version=$(netdata -v | awk '{print $2}')
|
||||
updater_version=$(cat packaging/version)
|
||||
if [ "$netdata_version" = "$updater_version" ]; then
|
||||
echo "Update successfull!"
|
||||
echo "Update successful!"
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
) >&2
|
||||
}
|
||||
|
||||
steps="check_successfull_update"
|
||||
steps="check_successful_update"
|
||||
|
||||
_main() {
|
||||
for step in $steps; do
|
||||
|
|
|
@ -505,7 +505,7 @@ static unsigned long aclk_reconnect_delay() {
|
|||
return aclk_tbeb_delay(0, aclk_env->backoff.base, aclk_env->backoff.min_s, aclk_env->backoff.max_s);
|
||||
}
|
||||
|
||||
/* Block till aclk_reconnect_delay is satisifed or netdata_exit is signalled
|
||||
/* Block till aclk_reconnect_delay is satisfied or netdata_exit is signalled
|
||||
* @return 0 - Go ahead and connect (delay expired)
|
||||
* 1 - netdata_exit
|
||||
*/
|
||||
|
|
|
@ -25,7 +25,7 @@ extern struct aclk_shared_state {
|
|||
time_t last_popcorn_interrupt;
|
||||
|
||||
// To wait for `disconnect` message PUBACK
|
||||
// when shuting down
|
||||
// when shutting down
|
||||
// at the same time if > 0 we know link is
|
||||
// shutting down
|
||||
int mqtt_shutdown_msg_id;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
// This is copied from Legacy ACLK, Original Autor: amoss
|
||||
// This is copied from Legacy ACLK, Original Author: amoss
|
||||
|
||||
// TODO unmess this
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
// This is copied from Legacy ACLK, Original Autor: amoss
|
||||
// This is copied from Legacy ACLK, Original Author: amoss
|
||||
|
||||
// TODO unmess this
|
||||
|
||||
|
|
|
@ -209,7 +209,7 @@ static int parse_passwd_response(const char *json_str, struct auth_data *auth) {
|
|||
|
||||
json = json_tokener_parse(json_str);
|
||||
if (!json) {
|
||||
error("JSON-C failed to parse the payload of http respons of /env endpoint");
|
||||
error("JSON-C failed to parse the payload of http response of /env endpoint");
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -359,7 +359,7 @@ static int aclk_parse_otp_error(const char *json_str) {
|
|||
|
||||
json = json_tokener_parse(json_str);
|
||||
if (!json) {
|
||||
error("JSON-C failed to parse the payload of http respons of /env endpoint");
|
||||
error("JSON-C failed to parse the payload of http response of /env endpoint");
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -730,7 +730,7 @@ static int parse_json_env(const char *json_str, aclk_env_t *env) {
|
|||
|
||||
json = json_tokener_parse(json_str);
|
||||
if (!json) {
|
||||
error("JSON-C failed to parse the payload of http respons of /env endpoint");
|
||||
error("JSON-C failed to parse the payload of http response of /env endpoint");
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ static void aclk_send_message_subtopic(mqtt_wss_client client, json_object *msg,
|
|||
const char *topic = aclk_get_topic(subtopic);
|
||||
|
||||
if (unlikely(!topic)) {
|
||||
error("Couldn't get topic. Aborting mesage send");
|
||||
error("Couldn't get topic. Aborting message send");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,7 @@ static uint16_t aclk_send_message_subtopic_pid(mqtt_wss_client client, json_obje
|
|||
const char *topic = aclk_get_topic(subtopic);
|
||||
|
||||
if (unlikely(!topic)) {
|
||||
error("Couldn't get topic. Aborting mesage send");
|
||||
error("Couldn't get topic. Aborting message send");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -606,7 +606,7 @@ static int parse_host_port(url_t *url) {
|
|||
error(URL_PARSER_LOG_PREFIX ": specified but no port number");
|
||||
return 1;
|
||||
}
|
||||
if (port_len > 5 /* MAX port lenght is 5digit long in decimal */) {
|
||||
if (port_len > 5 /* MAX port length is 5digit long in decimal */) {
|
||||
error(URL_PARSER_LOG_PREFIX "port # is too long");
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ extern netdata_mutex_t legacy_aclk_shared_state_mutex;
|
|||
#define ACLK_IS_HOST_POPCORNING(host) (ACLK_IS_HOST_INITIALIZING(host) && host->aclk_state.t_last_popcorn_update)
|
||||
|
||||
extern struct legacy_aclk_shared_state {
|
||||
// optimization to avoid looping trough hosts
|
||||
// optimization to avoid looping through hosts
|
||||
// every time Query Thread wakes up
|
||||
RRDHOST *next_popcorn_host;
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ struct aclk_lws_wss_engine_instance {
|
|||
struct lws_wss_packet_buffer *write_buffer_head;
|
||||
struct lws_ring *read_ringbuffer;
|
||||
|
||||
//flags to be readed by engine user
|
||||
//flags to be read by engine user
|
||||
int websocket_connection_up;
|
||||
|
||||
// currently this is by default disabled
|
||||
|
|
|
@ -222,7 +222,7 @@ char *get_topic(char *sub_topic, char *final_topic, int max_size)
|
|||
return final_topic;
|
||||
}
|
||||
|
||||
/* Avoids the need to scan trough all RRDHOSTS
|
||||
/* Avoids the need to scan through all RRDHOSTS
|
||||
* every time any Query Thread Wakes Up
|
||||
* (every time we need to check child popcorn expiry)
|
||||
* call with legacy_aclk_shared_state_LOCK held
|
||||
|
|
|
@ -3355,7 +3355,7 @@ static void normalize_utilization(struct target *root) {
|
|||
cgtime_fix_ratio = 1.0; //(double)(global_utime + global_stime) / (double)(utime + cutime + stime + cstime);
|
||||
}
|
||||
else if((global_utime + global_stime > utime + stime) && (cutime || cstime)) {
|
||||
// childrens resources are too high
|
||||
// children resources are too high
|
||||
// lower only the children resources
|
||||
utime_fix_ratio =
|
||||
stime_fix_ratio =
|
||||
|
|
|
@ -4203,7 +4203,7 @@ void *cgroups_main(void *ptr) {
|
|||
|
||||
int error = uv_thread_create(&discovery_thread.thread, cgroup_discovery_worker, NULL);
|
||||
if (error) {
|
||||
error("CGROUP: cannot create tread worker. uv_thread_create(): %s", uv_strerror(error));
|
||||
error("CGROUP: cannot create thread worker. uv_thread_create(): %s", uv_strerror(error));
|
||||
goto exit;
|
||||
}
|
||||
uv_thread_set_name_np(discovery_thread.thread, "PLUGIN[cgroups]");
|
||||
|
|
|
@ -92,7 +92,7 @@ EOF
|
|||
# _update is called continuously, to collect the values
|
||||
ap_update() {
|
||||
# the first argument to this function is the microseconds since last update
|
||||
# pass this parameter to the BEGIN statement (see bellow).
|
||||
# pass this parameter to the BEGIN statement (see below).
|
||||
|
||||
# do all the work to collect / calculate the values
|
||||
# for each dimension
|
||||
|
|
|
@ -118,7 +118,7 @@ EOF
|
|||
|
||||
apcupsd_update() {
|
||||
# the first argument to this function is the microseconds since last update
|
||||
# pass this parameter to the BEGIN statement (see bellow).
|
||||
# pass this parameter to the BEGIN statement (see below).
|
||||
|
||||
# do all the work to collect / calculate the values
|
||||
# for each dimension
|
||||
|
|
|
@ -103,7 +103,7 @@ EOF
|
|||
# _update is called continuously, to collect the values
|
||||
example_update() {
|
||||
# the first argument to this function is the microseconds since last update
|
||||
# pass this parameter to the BEGIN statement (see bellow).
|
||||
# pass this parameter to the BEGIN statement (see below).
|
||||
|
||||
example_get || return 1
|
||||
|
||||
|
|
|
@ -173,7 +173,7 @@ VALUESEOF
|
|||
# _update is called continuously, to collect the values
|
||||
libreswan_update() {
|
||||
# the first argument to this function is the microseconds since last update
|
||||
# pass this parameter to the BEGIN statement (see bellow).
|
||||
# pass this parameter to the BEGIN statement (see below).
|
||||
|
||||
libreswan_get || return 1
|
||||
libreswan_now=$(date +%s)
|
||||
|
|
|
@ -129,7 +129,7 @@ EOF2
|
|||
|
||||
nut_update() {
|
||||
# the first argument to this function is the microseconds since last update
|
||||
# pass this parameter to the BEGIN statement (see bellow).
|
||||
# pass this parameter to the BEGIN statement (see below).
|
||||
|
||||
# do all the work to collect / calculate the values
|
||||
# for each dimension
|
||||
|
|
|
@ -147,7 +147,7 @@ EOF
|
|||
|
||||
opensips_update() {
|
||||
# the first argument to this function is the microseconds since last update
|
||||
# pass this parameter to the BEGIN statement (see bellow).
|
||||
# pass this parameter to the BEGIN statement (see below).
|
||||
|
||||
# do all the work to collect / calculate the values
|
||||
# for each dimension
|
||||
|
@ -158,7 +158,7 @@ opensips_update() {
|
|||
# local opensips_client_http_ then one or more of these a-z 0-9 _ then = and one of more of 0-9
|
||||
# local opensips_server_all_ then one or more of these a-z 0-9 _ then = and one of more of 0-9
|
||||
# 4. then execute this as a script with the eval
|
||||
# be very carefull with eval:
|
||||
# be very careful with eval:
|
||||
# prepare the script and always grep at the end the lines that are useful, so that
|
||||
# even if something goes wrong, no other code can be executed
|
||||
|
||||
|
|
|
@ -237,7 +237,7 @@ sensors_create() {
|
|||
# _update is called continuously, to collect the values
|
||||
sensors_update() {
|
||||
# the first argument to this function is the microseconds since last update
|
||||
# pass this parameter to the BEGIN statement (see bellow).
|
||||
# pass this parameter to the BEGIN statement (see below).
|
||||
|
||||
# do all the work to collect / calculate the values
|
||||
# for each dimension
|
||||
|
|
|
@ -357,7 +357,7 @@ following functions:
|
|||
single write operation using a group of buffers rather than 1).
|
||||
- `vfs_read`: Function used for monitoring the number of successful & failed
|
||||
filesystem read calls, as well as the total number of read bytes.
|
||||
- `vfs_readv` Same function as `vfs_read` but for vector reads (i.e. a singe
|
||||
- `vfs_readv` Same function as `vfs_read` but for vector reads (i.e. a single
|
||||
read operation using a group of buffers rather than 1).
|
||||
- `vfs_unlink`: Function used for monitoring the number of successful & failed
|
||||
filesystem unlink calls.
|
||||
|
@ -589,8 +589,8 @@ Linux metrics:
|
|||
- Number of pages brought from disk. (`cachestat_misses`)
|
||||
- directory cache
|
||||
- Ratio of files available in directory cache. (`dc_hit_ratio`)
|
||||
- Number of files acessed. (`dc_reference`)
|
||||
- Number of files acessed that were not in cache. (`dc_not_cache`)
|
||||
- Number of files accessed. (`dc_reference`)
|
||||
- Number of files accessed that were not in cache. (`dc_not_cache`)
|
||||
- Number of files not found. (`dc_not_found`)
|
||||
- ipc shm
|
||||
- Number of calls to `shm_get`. (`shmget_call`)
|
||||
|
|
|
@ -355,7 +355,7 @@ void write_chart_dimension(char *dim, long long value)
|
|||
* @param move the pointer with the values that will be published
|
||||
* @param end the number of values that will be written on standard output
|
||||
*
|
||||
* @return It returns a variable tha maps the charts that did not have zero values.
|
||||
* @return It returns a variable that maps the charts that did not have zero values.
|
||||
*/
|
||||
void write_count_chart(char *name, char *family, netdata_publish_syscall_t *move, uint32_t end)
|
||||
{
|
||||
|
@ -424,7 +424,7 @@ void ebpf_one_dimension_write_charts(char *family, char *chart, char *dim, long
|
|||
* @param dread the dimension name
|
||||
* @param vread the value for previous dimension
|
||||
*
|
||||
* @return It returns a variable tha maps the charts that did not have zero values.
|
||||
* @return It returns a variable that maps the charts that did not have zero values.
|
||||
*/
|
||||
void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite, char *dread, long long vread)
|
||||
{
|
||||
|
@ -599,7 +599,7 @@ void ebpf_create_charts_on_apps(char *id, char *title, char *units, char *family
|
|||
* @param dimensions dimension values.
|
||||
* @param end number of bins that will be sent to Netdata.
|
||||
*
|
||||
* @return It returns a variable tha maps the charts that did not have zero values.
|
||||
* @return It returns a variable that maps the charts that did not have zero values.
|
||||
*/
|
||||
void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist, char **dimensions, uint32_t end)
|
||||
{
|
||||
|
@ -917,7 +917,7 @@ uint32_t ebpf_enable_tracepoints(ebpf_tracepoint_t *tps)
|
|||
|
||||
/*****************************************************************
|
||||
*
|
||||
* AUXILIAR FUNCTIONS USED DURING INITIALIZATION
|
||||
* AUXILIARY FUNCTIONS USED DURING INITIALIZATION
|
||||
*
|
||||
*****************************************************************/
|
||||
|
||||
|
|
|
@ -116,9 +116,9 @@ int am_i_running_as_root()
|
|||
/**
|
||||
* Reset the target values
|
||||
*
|
||||
* @param root the pointer to the chain that will be reseted.
|
||||
* @param root the pointer to the chain that will be reset.
|
||||
*
|
||||
* @return it returns the number of structures that was reseted.
|
||||
* @return it returns the number of structures that was reset.
|
||||
*/
|
||||
size_t zero_all_targets(struct target *root)
|
||||
{
|
||||
|
@ -949,7 +949,7 @@ void cleanup_variables_from_other_threads(uint32_t pid)
|
|||
socket_bandwidth_curr[pid] = NULL;
|
||||
}
|
||||
|
||||
// Clean cachestat strcture
|
||||
// Clean cachestat structure
|
||||
if (cachestat_pid) {
|
||||
freez(cachestat_pid[pid]);
|
||||
cachestat_pid[pid] = NULL;
|
||||
|
|
|
@ -111,7 +111,7 @@ static void ebpf_cachestat_cleanup(void *ptr)
|
|||
*
|
||||
* Update publish values before to write dimension.
|
||||
*
|
||||
* @param out strcuture that will receive data.
|
||||
* @param out structure that will receive data.
|
||||
* @param mpa calls for mark_page_accessed during the last second.
|
||||
* @param mbd calls for mark_buffer_dirty during the last second.
|
||||
* @param apcl calls for add_to_page_cache_lru during the last second.
|
||||
|
@ -481,7 +481,7 @@ void ebpf_cachestat_sum_pids(netdata_publish_cachestat_t *publish, struct pid_on
|
|||
}
|
||||
|
||||
/**
|
||||
* Send data to Netdata calling auxiliar functions.
|
||||
* Send data to Netdata calling auxiliary functions.
|
||||
*
|
||||
* @param root the target list.
|
||||
*/
|
||||
|
@ -784,7 +784,7 @@ static void ebpf_obsolete_specific_cachestat_charts(char *type, int update_every
|
|||
}
|
||||
|
||||
/**
|
||||
* Send data to Netdata calling auxiliar functions.
|
||||
* Send data to Netdata calling auxiliary functions.
|
||||
*
|
||||
* @param update_every value to overwrite the update frequency set by the server.
|
||||
*/
|
||||
|
|
|
@ -60,7 +60,7 @@ static ebpf_specify_name_t dc_optional_name[] = { {.program_name = "netdata_look
|
|||
*
|
||||
* Update publish values before to write dimension.
|
||||
*
|
||||
* @param out strcuture that will receive data.
|
||||
* @param out structure that will receive data.
|
||||
* @param cache_access number of access to directory cache.
|
||||
* @param not_found number of files not found on the file system
|
||||
*/
|
||||
|
@ -404,7 +404,7 @@ void ebpf_dcstat_sum_pids(netdata_publish_dcstat_t *publish, struct pid_on_targe
|
|||
}
|
||||
|
||||
/**
|
||||
* Send data to Netdata calling auxiliar functions.
|
||||
* Send data to Netdata calling auxiliary functions.
|
||||
*
|
||||
* @param root the target list.
|
||||
*/
|
||||
|
@ -782,7 +782,7 @@ static void ebpf_send_specific_dc_data(char *type, netdata_publish_dcstat_t *pdc
|
|||
}
|
||||
|
||||
/**
|
||||
* Send data to Netdata calling auxiliar functions.
|
||||
* Send data to Netdata calling auxiliary functions.
|
||||
*
|
||||
* @param update_every value to overwrite the update frequency set by the server.
|
||||
*/
|
||||
|
|
|
@ -103,7 +103,7 @@ static void ebpf_fd_cleanup(void *ptr)
|
|||
*****************************************************************/
|
||||
|
||||
/**
|
||||
* Send data to Netdata calling auxiliar functions.
|
||||
* Send data to Netdata calling auxiliary functions.
|
||||
*
|
||||
* @param em the structure with thread information
|
||||
*/
|
||||
|
@ -320,7 +320,7 @@ static void ebpf_fd_sum_pids(netdata_fd_stat_t *fd, struct pid_on_target *root)
|
|||
}
|
||||
|
||||
/**
|
||||
* Send data to Netdata calling auxiliar functions.
|
||||
* Send data to Netdata calling auxiliary functions.
|
||||
*
|
||||
* @param em the structure with thread information
|
||||
* @param root the target list.
|
||||
|
@ -609,7 +609,7 @@ static int ebpf_send_systemd_fd_charts(ebpf_module_t *em)
|
|||
}
|
||||
|
||||
/**
|
||||
* Send data to Netdata calling auxiliar functions.
|
||||
* Send data to Netdata calling auxiliary functions.
|
||||
*
|
||||
* @param em the main collector structure
|
||||
*/
|
||||
|
|
|
@ -124,7 +124,7 @@ void *ebpf_mount_read_hash(void *ptr)
|
|||
}
|
||||
|
||||
/**
|
||||
* Send data to Netdata calling auxiliar functions.
|
||||
* Send data to Netdata calling auxiliary functions.
|
||||
*/
|
||||
static void ebpf_mount_send_data()
|
||||
{
|
||||
|
|
|
@ -199,7 +199,7 @@ static void ebpf_obsolete_specific_oomkill_charts(char *type, int update_every)
|
|||
}
|
||||
|
||||
/**
|
||||
* Send data to Netdata calling auxiliar functions.
|
||||
* Send data to Netdata calling auxiliary functions.
|
||||
*
|
||||
* @param update_every value to overwrite the update frequency set by the server.
|
||||
*/
|
||||
|
|
|
@ -114,7 +114,7 @@ static void write_status_chart(char *family, netdata_publish_vfs_common_t *pvc)
|
|||
}
|
||||
|
||||
/**
|
||||
* Send data to Netdata calling auxiliar functions.
|
||||
* Send data to Netdata calling auxiliary functions.
|
||||
*
|
||||
* @param em the structure with thread information
|
||||
*/
|
||||
|
@ -185,7 +185,7 @@ void ebpf_process_remove_pids()
|
|||
}
|
||||
|
||||
/**
|
||||
* Send data to Netdata calling auxiliar functions.
|
||||
* Send data to Netdata calling auxiliary functions.
|
||||
*
|
||||
* @param root the target list.
|
||||
*/
|
||||
|
@ -848,7 +848,7 @@ static int ebpf_send_systemd_process_charts(ebpf_module_t *em)
|
|||
}
|
||||
|
||||
/**
|
||||
* Send data to Netdata calling auxiliar functions.
|
||||
* Send data to Netdata calling auxiliary functions.
|
||||
*
|
||||
* @param em the structure with thread information
|
||||
*/
|
||||
|
|
|
@ -309,7 +309,7 @@ static void ebpf_shm_sum_pids(netdata_publish_shm_t *shm, struct pid_on_target *
|
|||
}
|
||||
|
||||
/**
|
||||
* Send data to Netdata calling auxiliar functions.
|
||||
* Send data to Netdata calling auxiliary functions.
|
||||
*
|
||||
* @param root the target list.
|
||||
*/
|
||||
|
@ -599,7 +599,7 @@ static void ebpf_send_specific_shm_data(char *type, netdata_publish_shm_t *value
|
|||
}
|
||||
|
||||
/**
|
||||
* Send data to Netdata calling auxiliar functions.
|
||||
* Send data to Netdata calling auxiliary functions.
|
||||
*
|
||||
* @param update_every value to overwrite the update frequency set by the server.
|
||||
*/
|
||||
|
|
|
@ -294,7 +294,7 @@ static void ebpf_socket_send_nv_data(netdata_vector_plot_t *ptr)
|
|||
}
|
||||
|
||||
/**
|
||||
* Send data to Netdata calling auxiliar functions.
|
||||
* Send data to Netdata calling auxiliary functions.
|
||||
*
|
||||
* @param em the structure with thread information
|
||||
*/
|
||||
|
@ -304,7 +304,7 @@ static void ebpf_socket_send_data(ebpf_module_t *em)
|
|||
netdata_publish_vfs_common_t common_udp;
|
||||
ebpf_update_global_publish(socket_publish_aggregated, &common_tcp, &common_udp, socket_aggregated_data);
|
||||
|
||||
// We read bytes from function arguments, but bandiwdth is given in bits,
|
||||
// We read bytes from function arguments, but bandwidth is given in bits,
|
||||
// so we need to multiply by 8 to convert for the final value.
|
||||
write_count_chart(NETDATA_TCP_FUNCTION_COUNT, NETDATA_EBPF_IP_FAMILY, socket_publish_aggregated, 3);
|
||||
write_io_chart(NETDATA_TCP_FUNCTION_BITS, NETDATA_EBPF_IP_FAMILY, socket_id_names[0],
|
||||
|
@ -353,7 +353,7 @@ long long ebpf_socket_sum_values_for_pids(struct pid_on_target *root, size_t off
|
|||
}
|
||||
|
||||
/**
|
||||
* Send data to Netdata calling auxiliar functions.
|
||||
* Send data to Netdata calling auxiliary functions.
|
||||
*
|
||||
* @param em the structure with thread information
|
||||
* @param root the target list.
|
||||
|
@ -2047,7 +2047,7 @@ void ebpf_socket_update_cgroup_algorithm()
|
|||
}
|
||||
|
||||
/**
|
||||
* Send data to Netdata calling auxiliar functions.
|
||||
* Send data to Netdata calling auxiliary functions.
|
||||
*
|
||||
* @param update_every value to overwrite the update frequency set by the server.
|
||||
*/
|
||||
|
|
|
@ -299,7 +299,7 @@ static void ebpf_swap_sum_pids(netdata_publish_swap_t *swap, struct pid_on_targe
|
|||
}
|
||||
|
||||
/**
|
||||
* Send data to Netdata calling auxiliar functions.
|
||||
* Send data to Netdata calling auxiliary functions.
|
||||
*
|
||||
* @param root the target list.
|
||||
*/
|
||||
|
@ -480,7 +480,7 @@ static void ebpf_create_systemd_swap_charts(int update_every)
|
|||
}
|
||||
|
||||
/**
|
||||
* Send data to Netdata calling auxiliar functions.
|
||||
* Send data to Netdata calling auxiliary functions.
|
||||
*
|
||||
* @param update_every value to overwrite the update frequency set by the server.
|
||||
*/
|
||||
|
|
|
@ -103,7 +103,7 @@ static void ebpf_vfs_cleanup(void *ptr)
|
|||
*****************************************************************/
|
||||
|
||||
/**
|
||||
* Send data to Netdata calling auxiliar functions.
|
||||
* Send data to Netdata calling auxiliary functions.
|
||||
*
|
||||
* @param em the structure with thread information
|
||||
*/
|
||||
|
@ -270,7 +270,7 @@ static void ebpf_vfs_sum_pids(netdata_publish_vfs_t *vfs, struct pid_on_target *
|
|||
}
|
||||
|
||||
/**
|
||||
* Send data to Netdata calling auxiliar functions.
|
||||
* Send data to Netdata calling auxiliary functions.
|
||||
*
|
||||
* @param em the structure with thread information
|
||||
* @param root the target list.
|
||||
|
@ -1122,7 +1122,7 @@ static int ebpf_send_systemd_vfs_charts(ebpf_module_t *em)
|
|||
}
|
||||
|
||||
/**
|
||||
* Send data to Netdata calling auxiliar functions.
|
||||
* Send data to Netdata calling auxiliary functions.
|
||||
*
|
||||
* @param em the main collector structure
|
||||
*/
|
||||
|
|
|
@ -233,7 +233,7 @@ var named = {
|
|||
x = keys[len];
|
||||
|
||||
// we maintain an index of the values found
|
||||
// mapping them to objects splitted
|
||||
// mapping them to objects split
|
||||
|
||||
look = named.lookups.nsstats[x];
|
||||
if(typeof look === 'undefined') {
|
||||
|
@ -418,7 +418,7 @@ var named = {
|
|||
var y = ykeys[ylen];
|
||||
|
||||
// we maintain an index of the values found
|
||||
// mapping them to objects splitted
|
||||
// mapping them to objects split
|
||||
|
||||
look = named.lookups.resolver_stats[y];
|
||||
if(typeof look === 'undefined') {
|
||||
|
|
|
@ -553,7 +553,7 @@ Each port will have its counters metrics monitored, grouped in the following cha
|
|||
|
||||
- **Errors Statistics**
|
||||
Many errors counters are provided, presenting statistics for:
|
||||
- Packets: malformated, sent/received discarded by card/switch, missing ressource
|
||||
- Packets: malformed, sent/received discarded by card/switch, missing resource
|
||||
- Link: downed, recovered, integrity error, minor error
|
||||
- Other events: Tick Wait to send, buffer overrun
|
||||
|
||||
|
|
|
@ -979,7 +979,7 @@ int do_proc_net_dev(int update_every, usec_t dt) {
|
|||
, NULL
|
||||
, d->chart_family
|
||||
, "net.carrier"
|
||||
, "Inteface Physical Link State"
|
||||
, "Interface Physical Link State"
|
||||
, "state"
|
||||
, PLUGIN_PROC_NAME
|
||||
, PLUGIN_PROC_MODULE_NETDEV_NAME
|
||||
|
|
|
@ -139,7 +139,7 @@ int do_proc_pagetypeinfo(int update_every, usec_t dt) {
|
|||
return 1;
|
||||
}
|
||||
|
||||
// 4th line is the "Free pages count per migrate type at order". Just substract these 8 words.
|
||||
// 4th line is the "Free pages count per migrate type at order". Just subtract these 8 words.
|
||||
pageorders_cnt = procfile_linewords(ff, 3);
|
||||
if (pageorders_cnt < 9) {
|
||||
error("PLUGIN: PROC_PAGETYPEINFO: Unable to parse Line 4 of %s", ff_path);
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
GEN(port_rcv_constraint_errors, errors, "Pkts rcvd discarded ", 1, __VA_ARGS__) \
|
||||
GEN(port_xmit_discards, errors, "Pkts sent discarded", 1, __VA_ARGS__) \
|
||||
GEN(port_xmit_wait, errors, "Tick Wait to send", 1, __VA_ARGS__) \
|
||||
GEN(VL15_dropped, errors, "Pkts missed ressource", 1, __VA_ARGS__) \
|
||||
GEN(VL15_dropped, errors, "Pkts missed resource", 1, __VA_ARGS__) \
|
||||
GEN(excessive_buffer_overrun_errors, errors, "Buffer overrun", 1, __VA_ARGS__) \
|
||||
GEN(link_downed, errors, "Link Downed", 1, __VA_ARGS__) \
|
||||
GEN(link_error_recovery, errors, "Link recovered", 1, __VA_ARGS__) \
|
||||
|
|
|
@ -188,7 +188,7 @@ class Service(SimpleService):
|
|||
self.custom_model_scalers[model] = MinMaxScaler()
|
||||
|
||||
def reinitialize(self):
|
||||
"""Reinitialize charts, models and data to a begining state.
|
||||
"""Reinitialize charts, models and data to a beginning state.
|
||||
"""
|
||||
self.charts_init()
|
||||
self.custom_models_init()
|
||||
|
@ -385,7 +385,7 @@ class Service(SimpleService):
|
|||
|
||||
def get_data(self):
|
||||
|
||||
# initialize to whats available right now
|
||||
# initialize to what's available right now
|
||||
if self.reinitialize_at_every_step or len(self.host_charts_dict[self.host]) == 0:
|
||||
self.charts_init()
|
||||
self.custom_models_init()
|
||||
|
|
|
@ -12,8 +12,8 @@ on your Netdata charts and/or dimensions.
|
|||
|
||||
Instead of this collector just _collecting_ data, it also does some computation on the data it collects to return a
|
||||
changepoint score for each chart or dimension you configure it to work on. This is
|
||||
an [online](https://en.wikipedia.org/wiki/Online_machine_learning) machine learning algorithim so there is no batch step
|
||||
to train the model, instead it evolves over time as more data arrives. That makes this particualr algorithim quite cheap
|
||||
an [online](https://en.wikipedia.org/wiki/Online_machine_learning) machine learning algorithm so there is no batch step
|
||||
to train the model, instead it evolves over time as more data arrives. That makes this particular algorithm quite cheap
|
||||
to compute at each step of data collection (see the notes section below for more details) and it should scale fairly
|
||||
well to work on lots of charts or hosts (if running on a parent node for example).
|
||||
|
||||
|
@ -28,7 +28,7 @@ Two charts are available:
|
|||
This chart shows the percentile of the score that is output from the ChangeFinder library (it is turned off by default
|
||||
but available with `show_scores: true`).
|
||||
|
||||
A high observed score is more likley to be a valid changepoint worth exploring, even more so when multiple charts or
|
||||
A high observed score is more likely to be a valid changepoint worth exploring, even more so when multiple charts or
|
||||
dimensions have high changepoint scores at the same time or very close together.
|
||||
|
||||
### ChangeFinder Flags (`changefinder.flags`)
|
||||
|
@ -36,11 +36,11 @@ dimensions have high changepoint scores at the same time or very close together.
|
|||
This chart shows `1` or `0` if the latest score has a percentile value that exceeds the `cf_threshold` threshold. By
|
||||
default, any scores that are in the 99th or above percentile will raise a flag on this chart.
|
||||
|
||||
The raw changefinder score itself can be a little noisey and so limiting ourselves to just periods where it surpasses
|
||||
The raw changefinder score itself can be a little noisy and so limiting ourselves to just periods where it surpasses
|
||||
the 99th percentile can help manage the "[signal to noise ratio](https://en.wikipedia.org/wiki/Signal-to-noise_ratio)"
|
||||
better.
|
||||
|
||||
The `cf_threshold` paramater might be one you want to play around with to tune things specifically for the workloads on
|
||||
The `cf_threshold` parameter might be one you want to play around with to tune things specifically for the workloads on
|
||||
your node and the specific charts you want to monitor. For example, maybe the 95th percentile might work better for you
|
||||
than the 99th percentile.
|
||||
|
||||
|
@ -164,7 +164,7 @@ sudo su -s /bin/bash netdata
|
|||
- It may take an hour or two (depending on your choice of `n_score_samples`) for the collector to 'settle' into it's
|
||||
typical behaviour in terms of the trained models and scores you will see in the normal running of your node. Mainly
|
||||
this is because it can take a while to build up a proper distribution of previous scores in over to convert the raw
|
||||
score returned by the ChangeFinder algorithim into a percentile based on the most recent `n_score_samples` that have
|
||||
score returned by the ChangeFinder algorithm into a percentile based on the most recent `n_score_samples` that have
|
||||
already been produced. So when you first turn the collector on, it will have a lot of flags in the beginning and then
|
||||
should 'settle down' once it has built up enough history. This is a typical characteristic of online machine learning
|
||||
approaches which need some initial window of time before they can be useful.
|
||||
|
|
|
@ -237,7 +237,7 @@ class Service(UrlService):
|
|||
gc_pauses = memstats['PauseNs']
|
||||
try:
|
||||
gc_pause_avg = sum(gc_pauses) / len([x for x in gc_pauses if x > 0])
|
||||
# no GC cycles have occured yet
|
||||
# no GC cycles have occurred yet
|
||||
except ZeroDivisionError:
|
||||
gc_pause_avg = 0
|
||||
|
||||
|
|
|
@ -250,10 +250,10 @@ CHARTS = {
|
|||
]
|
||||
},
|
||||
'cursors': {
|
||||
'options': [None, 'Currently openned cursors, cursors with timeout disabled and timed out cursors',
|
||||
'options': [None, 'Currently opened cursors, cursors with timeout disabled and timed out cursors',
|
||||
'cursors', 'database performance', 'mongodb.cursors', 'stacked'],
|
||||
'lines': [
|
||||
['cursor_total', 'openned', 'absolute', 1, 1],
|
||||
['cursor_total', 'opened', 'absolute', 1, 1],
|
||||
['noTimeout', None, 'absolute', 1, 1],
|
||||
['timedOut', None, 'incremental', 1, 1]
|
||||
]
|
||||
|
|
|
@ -97,7 +97,7 @@
|
|||
# the client (Netdata) is not considered local, unless it runs from inside
|
||||
# the same container.
|
||||
#
|
||||
# Superuser access is needed for theses charts:
|
||||
# Superuser access is needed for these charts:
|
||||
# Write-Ahead Logs
|
||||
# Archive Write-Ahead Logs
|
||||
#
|
||||
|
|
|
@ -43,7 +43,7 @@ looking at first (for more background information on why 3 stddev
|
|||
see [here](https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule#:~:text=In%20the%20empirical%20sciences%20the,99.7%25%20probability%20as%20near%20certainty.))
|
||||
.
|
||||
|
||||
In the example below we basically took a sledge hammer to our system so its not suprising that lots of charts light up
|
||||
In the example below we basically took a sledge hammer to our system so its not surprising that lots of charts light up
|
||||
after we run the stress command. In a more realistic setting you might just see a handful of charts with strange zscores
|
||||
and that could be a good indication of where to look first.
|
||||
|
||||
|
@ -101,9 +101,9 @@ information about each one and what it does.
|
|||
host: '127.0.0.1:19999'
|
||||
# What charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc.
|
||||
charts_regex: 'system\..*'
|
||||
# length of time to base calulcations off for mean and stddev
|
||||
# length of time to base calculations off for mean and stddev
|
||||
train_secs: 14400 # use last 4 hours to work out the mean and stddev for the zscore
|
||||
# offset preceeding latest data to ignore when calculating mean and stddev
|
||||
# offset preceding latest data to ignore when calculating mean and stddev
|
||||
offset_secs: 300 # ignore last 5 minutes of data when calculating the mean and stddev
|
||||
# recalculate the mean and stddev every n steps of the collector
|
||||
train_every_n: 900 # recalculate mean and stddev every 15 minutes
|
||||
|
@ -114,11 +114,11 @@ z_clip: 10 # cap each zscore at 10 so as to avoid really large individual zscore
|
|||
# set z_abs: 'true' to make all zscores be absolute values only.
|
||||
z_abs: 'true'
|
||||
# burn in period in which to initially calculate mean and stddev on every step
|
||||
burn_in: 2 # on startup of the collector continually update the mean and stddev in case any gaps or inital calculations fail to return
|
||||
burn_in: 2 # on startup of the collector continually update the mean and stddev in case any gaps or initial calculations fail to return
|
||||
# mode can be to get a zscore 'per_dim' or 'per_chart'
|
||||
mode: 'per_chart' # 'per_chart' means individual dimension level smoothed zscores will be aggregated to one zscore per chart per time step
|
||||
# per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'
|
||||
per_chart_agg: 'mean' # 'absmax' will take the max absolute value accross all dimensions but will maintain the sign. 'mean' will just average.
|
||||
per_chart_agg: 'mean' # 'absmax' will take the max absolute value across all dimensions but will maintain the sign. 'mean' will just average.
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
@ -128,7 +128,7 @@ per_chart_agg: 'mean' # 'absmax' will take the max absolute value accross all di
|
|||
calls to the netdata rest api to get the required data for each chart when calculating the mean and stddev.
|
||||
- It may take a few hours or so for the collector to 'settle' into it's typical behaviour in terms of the scores you
|
||||
will see in the normal running of your system.
|
||||
- The zscore you see for each chart when using `mode: 'per_chart'` as actually an aggregated zscore accross all the
|
||||
- The zscore you see for each chart when using `mode: 'per_chart'` as actually an aggregated zscore across all the
|
||||
dimensions on the underlying chart.
|
||||
- If you set `mode: 'per_dim'` then you will see a zscore for each dimension on each chart as opposed to one per chart.
|
||||
- As this collector does some calculations itself in python you may want to try it out first on a test or development
|
||||
|
|
|
@ -83,7 +83,7 @@ local:
|
|||
# length of time to base calculations off for mean and stddev
|
||||
train_secs: 14400 # use last 4 hours to work out the mean and stddev for the zscore
|
||||
|
||||
# offset preceeding latest data to ignore when calculating mean and stddev
|
||||
# offset preceding latest data to ignore when calculating mean and stddev
|
||||
offset_secs: 300 # ignore last 5 minutes of data when calculating the mean and stddev
|
||||
|
||||
# recalculate the mean and stddev every n steps of the collector
|
||||
|
@ -99,10 +99,10 @@ local:
|
|||
z_abs: 'true'
|
||||
|
||||
# burn in period in which to initially calculate mean and stddev on every step
|
||||
burn_in: 2 # on startup of the collector continually update the mean and stddev in case any gaps or inital calculations fail to return
|
||||
burn_in: 2 # on startup of the collector continually update the mean and stddev in case any gaps or initial calculations fail to return
|
||||
|
||||
# mode can be to get a zscore 'per_dim' or 'per_chart'
|
||||
mode: 'per_chart' # 'per_chart' means individual dimension level smoothed zscores will be aggregated to one zscore per chart per time step
|
||||
|
||||
# per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'
|
||||
per_chart_agg: 'mean' # 'absmax' will take the max absolute value accross all dimensions but will maintain the sign. 'mean' will just average.
|
||||
per_chart_agg: 'mean' # 'absmax' will take the max absolute value across all dimensions but will maintain the sign. 'mean' will just average.
|
||||
|
|
|
@ -21,7 +21,7 @@ Netdata statsd is fast. It can collect more than **1.200.000 metrics per second*
|
|||
|
||||
# Available StatsD collectors
|
||||
|
||||
Netdata ships with collectors implemented using the StatsD collector. They are configuration files (as you will read bellow), but they function as a collector, in the sense that configuration file organize the metrics of a data source into pre-defined charts.
|
||||
Netdata ships with collectors implemented using the StatsD collector. They are configuration files (as you will read below), but they function as a collector, in the sense that configuration file organize the metrics of a data source into pre-defined charts.
|
||||
|
||||
On these charts, we can have alarms as with any metric and chart.
|
||||
|
||||
|
@ -64,7 +64,7 @@ Netdata fully supports the StatsD protocol. All StatsD client libraries can be u
|
|||
- Timers use `|ms`
|
||||
- Histograms use `|h`
|
||||
|
||||
The only difference between the two, is the `units` of the charts, as timers report *miliseconds*.
|
||||
The only difference between the two, is the `units` of the charts, as timers report *milliseconds*.
|
||||
|
||||
[Sampling rate](#sampling-rates) is supported.
|
||||
|
||||
|
@ -102,7 +102,7 @@ When sending multiple packets over UDP, it is important not to exceed the networ
|
|||
|
||||
Netdata will accept UDP packets up to 9000 bytes, but the underlying network will not exceed MTU.
|
||||
|
||||
> You can read more about the network maxium transmission unit(MTU) in this cloudflare [article](https://www.cloudflare.com/en-gb/learning/network-layer/what-is-mtu/).
|
||||
> You can read more about the network maximum transmission unit(MTU) in this cloudflare [article](https://www.cloudflare.com/en-gb/learning/network-layer/what-is-mtu/).
|
||||
|
||||
## Configuration
|
||||
|
||||
|
|
|
@ -199,7 +199,7 @@ AC_ARG_ENABLE(
|
|||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Enforce building with C99, bail early if we can't.
|
||||
test "${ac_cv_prog_cc_c99}" = "no" && AC_MSG_ERROR([Netdata rquires a compiler that supports C99 to build])
|
||||
test "${ac_cv_prog_cc_c99}" = "no" && AC_MSG_ERROR([Netdata requires a compiler that supports C99 to build])
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Check if cloud is enabled and if the functionality is available
|
||||
|
@ -823,7 +823,7 @@ if test "$enable_cloud" != "no" -a "$aclk_ng" != "no"; then
|
|||
AC_MSG_CHECKING([ACLK Next Generation can support New Cloud protocol])
|
||||
AC_MSG_RESULT([${can_build_new_cloud_protocol}])
|
||||
if test "$new_cloud_protocol" = "yes" -a "$can_build_new_cloud_protocol" != "yes"; then
|
||||
AC_MSG_ERROR([Requested new cloud protocol support but it cant be build])
|
||||
AC_MSG_ERROR([Requested new cloud protocol support but it can't be build])
|
||||
fi
|
||||
if test "$can_build_new_cloud_protocol" = "yes"; then
|
||||
new_cloud_protocol="yes"
|
||||
|
@ -1225,7 +1225,7 @@ fi
|
|||
# -----------------------------------------------------------------------------
|
||||
# ml - anomaly detection
|
||||
|
||||
# Check if uuid is availabe. Fail if ML was explicitly requested.
|
||||
# Check if uuid is available. Fail if ML was explicitly requested.
|
||||
if test "${enable_ml}" = "yes" -a "${have_uuid}" != "yes"; then
|
||||
AC_MSG_ERROR([You have explicitly requested --enable-ml functionality but libuuid can not be found."])
|
||||
fi
|
||||
|
|
|
@ -239,7 +239,7 @@ void analytics_mirrored_hosts(void)
|
|||
void analytics_exporters(void)
|
||||
{
|
||||
//when no exporters are available, an empty string will be sent
|
||||
//decide if something else is more suitable (but propably not null)
|
||||
//decide if something else is more suitable (but probably not null)
|
||||
BUFFER *bi = buffer_create(1000);
|
||||
analytics_exporting_connectors(bi);
|
||||
analytics_set_data_str(&analytics_data.netdata_exporting_connectors, (char *)buffer_tostring(bi));
|
||||
|
|
|
@ -679,12 +679,12 @@ restart_after_removal:
|
|||
|
||||
int rrd_init(char *hostname, struct rrdhost_system_info *system_info) {
|
||||
rrdset_free_obsolete_time = config_get_number(CONFIG_SECTION_GLOBAL, "cleanup obsolete charts after seconds", rrdset_free_obsolete_time);
|
||||
// Current chart locking and invalidation scheme doesn't prevent Netdata from segmentaion faults if a short
|
||||
// Current chart locking and invalidation scheme doesn't prevent Netdata from segmentation faults if a short
|
||||
// cleanup delay is set. Extensive stress tests showed that 10 seconds is quite a safe delay. Look at
|
||||
// https://github.com/netdata/netdata/pull/11222#issuecomment-868367920 for more information.
|
||||
if (rrdset_free_obsolete_time < 10) {
|
||||
rrdset_free_obsolete_time = 10;
|
||||
info("The \"cleanup obsolete charts after seconds\" option was set to 10 seconds. A lower delay can potentially cause a segmentaion fault.");
|
||||
info("The \"cleanup obsolete charts after seconds\" option was set to 10 seconds. A lower delay can potentially cause a segmentation fault.");
|
||||
}
|
||||
gap_when_lost_iterations_above = (int)config_get_number(CONFIG_SECTION_GLOBAL, "gap when lost iterations above", gap_when_lost_iterations_above);
|
||||
if (gap_when_lost_iterations_above < 1)
|
||||
|
|
|
@ -14,7 +14,7 @@ $HTTP["url"] =~ "^/netdata/" {
|
|||
}
|
||||
```
|
||||
|
||||
If you have older lighttpd you have to use a chain (such as bellow), as explained [at this stackoverflow answer](http://stackoverflow.com/questions/14536554/lighttpd-configuration-to-proxy-rewrite-from-one-domain-to-another).
|
||||
If you have older lighttpd you have to use a chain (such as below), as explained [at this stackoverflow answer](http://stackoverflow.com/questions/14536554/lighttpd-configuration-to-proxy-rewrite-from-one-domain-to-another).
|
||||
|
||||
```txt
|
||||
$HTTP["url"] =~ "^/netdata/" {
|
||||
|
|
|
@ -39,7 +39,7 @@ Some caveats and tips to keep in mind:
|
|||
|
||||
- Only metrics in the export timeframe are available to you. If you zoom out or pan through time, you'll see the
|
||||
beginning and end of the snapshot.
|
||||
- Charts won't update with new inforamtion, as you're looking at a static replica, not the live dashboard.
|
||||
- Charts won't update with new information, as you're looking at a static replica, not the live dashboard.
|
||||
- The import is only temporary. Reload your browser tab to return to your node's real-time dashboard.
|
||||
|
||||
## Export a snapshot
|
||||
|
|
|
@ -13,7 +13,7 @@ maximum granularity using Netdata. Collect more than 50 unique metrics and put t
|
|||
designed for better visual anomaly detection.
|
||||
|
||||
Netdata itself uses CockroachDB as part of its Netdata Cloud infrastructure, so we're happy to introduce this new
|
||||
collector and help others get started with it straightaway.
|
||||
collector and help others get started with it straight away.
|
||||
|
||||
Let's dive in and walk through the process of monitoring CockroachDB metrics with Netdata.
|
||||
|
||||
|
|
|
@ -123,7 +123,7 @@ configure the collector to monitor charts from the
|
|||
log](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/weblog) collectors.
|
||||
|
||||
`charts_regex` allows for some basic regex, such as wildcards (`*`) to match all contexts with a certain pattern. For
|
||||
example, `system\..*` matches with any chart wit ha context that begins with `system.`, and ends in any number of other
|
||||
example, `system\..*` matches with any chart with a context that begins with `system.`, and ends in any number of other
|
||||
characters (`.*`). Note the escape character (`\`) around the first period to capture a period character exactly, and
|
||||
not any character.
|
||||
|
||||
|
|
|
@ -111,7 +111,7 @@ Find more details about family and context in our [documentation](/web/README.md
|
|||
Now, having decided on how we are going to group the charts, we need to define how we are going to group metrics into different charts. This is particularly important, since we decide:
|
||||
|
||||
- What metrics **not** to show, since they are not useful for our use-case.
|
||||
- What metrics to consolidate into the same charts, so as to reduce noice and increase visual correlation.
|
||||
- What metrics to consolidate into the same charts, so as to reduce noise and increase visual correlation.
|
||||
|
||||
The dimension option has this syntax: `dimension = [pattern] METRIC NAME TYPE MULTIPLIER DIVIDER OPTIONS`
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ prebuilt method for collecting your required metric data.
|
|||
|
||||
In this tutorial, you'll learn how to leverage the [Python programming language](https://www.python.org/) to build a
|
||||
custom data collector for the Netdata Agent. Follow along with your own dataset, using the techniques and best practices
|
||||
covered here, or use the included examples for collecting and organizing eithre random or weather data.
|
||||
covered here, or use the included examples for collecting and organizing either random or weather data.
|
||||
|
||||
## What you need to get started
|
||||
|
||||
|
@ -48,7 +48,7 @@ The basic elements of a Netdata collector are:
|
|||
- `ORDER[]`: A list containing the charts to be displayed.
|
||||
- `CHARTS{}`: A dictionary containing the details for the charts to be displayed.
|
||||
- `data{}`: A dictionary containing the values to be displayed.
|
||||
- `get_data()`: The basic function of the plugin which will retrun to Netdata the correct values.
|
||||
- `get_data()`: The basic function of the plugin which will return to Netdata the correct values.
|
||||
|
||||
Let's walk through these jobs and elements as independent elements first, then apply them to example Python code.
|
||||
|
||||
|
@ -138,7 +138,7 @@ correct values.
|
|||
The `python.d` plugin has a number of framework classes that can be used to speed up the development of your python
|
||||
collector. Your class can inherit one of these framework classes, which have preconfigured methods.
|
||||
|
||||
For example, the snippet bellow is from the [RabbitMQ
|
||||
For example, the snippet below is from the [RabbitMQ
|
||||
collector](https://github.com/netdata/netdata/blob/91f3268e9615edd393bd43de4ad8068111024cc9/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py#L273).
|
||||
This collector uses an HTTP endpoint and uses the `UrlService` framework class, which only needs to define an HTTP
|
||||
endpoint for data collection.
|
||||
|
@ -298,7 +298,7 @@ class Service(SimpleService):
|
|||
def get_data(self):
|
||||
#The data dict is basically all the values to be represented
|
||||
# The entries are in the format: { "dimension": value}
|
||||
#And each "dimension" shoudl belong to a chart.
|
||||
#And each "dimension" should belong to a chart.
|
||||
data = dict()
|
||||
|
||||
self.populate_data()
|
||||
|
@ -356,7 +356,7 @@ chart:
|
|||
Next, time to add one more chart that visualizes the average, minimum, and maximum temperature values.
|
||||
|
||||
Add a new entry in the `CHARTS` dictionary with the definition for the new chart. Since you want three values
|
||||
represented in this this chart, add three dimensions. You shoudl also use the same `FAMILY` value in the charts (`TEMP`)
|
||||
represented in this this chart, add three dimensions. You should also use the same `FAMILY` value in the charts (`TEMP`)
|
||||
so that those two charts are grouped together.
|
||||
|
||||
```python
|
||||
|
@ -418,7 +418,7 @@ configuration in [YAML](https://www.tutorialspoint.com/yaml/yaml_basics.htm) for
|
|||
- Create a configuration file in the same directory as the `<plugin_name>.chart.py`. Name it `<plugin_name>.conf`.
|
||||
- Define a `job`, which is an instance of the collector. It is useful when you want to collect data from different
|
||||
sources with different attributes. For example, we could gather data from 2 different weather stations, which use
|
||||
different temperature measures: Fahrenheit and Celcius.
|
||||
different temperature measures: Fahrenheit and Celsius.
|
||||
- You can define many different jobs with the same name, but with different attributes. Netdata will try each job
|
||||
serially and will stop at the first job that returns data. If multiple jobs have the same name, only one of them can
|
||||
run. This enables you to define different "ways" to fetch data from a particular data source so that the collector has
|
||||
|
|
|
@ -92,7 +92,7 @@ int init_connectors(struct engine *engine)
|
|||
// dispatch the instance worker thread
|
||||
int error = uv_thread_create(&instance->thread, instance->worker, instance);
|
||||
if (error) {
|
||||
error("EXPORTING: cannot create tread worker. uv_thread_create(): %s", uv_strerror(error));
|
||||
error("EXPORTING: cannot create thread worker. uv_thread_create(): %s", uv_strerror(error));
|
||||
return 1;
|
||||
}
|
||||
char threadname[NETDATA_THREAD_NAME_MAX + 1];
|
||||
|
|
|
@ -136,7 +136,7 @@ static inline time_t prometheus_server_last_access(const char *server, RRDHOST *
|
|||
* Copy and sanitize name.
|
||||
*
|
||||
* @param d a destination string.
|
||||
* @param s a source sting.
|
||||
* @param s a source string.
|
||||
* @param usable the number of characters to copy.
|
||||
* @return Returns the length of the copied string.
|
||||
*/
|
||||
|
@ -161,7 +161,7 @@ inline size_t prometheus_name_copy(char *d, const char *s, size_t usable)
|
|||
* Copy and sanitize label.
|
||||
*
|
||||
* @param d a destination string.
|
||||
* @param s a source sting.
|
||||
* @param s a source string.
|
||||
* @param usable the number of characters to copy.
|
||||
* @return Returns the length of the copied string.
|
||||
*/
|
||||
|
@ -190,7 +190,7 @@ inline size_t prometheus_label_copy(char *d, const char *s, size_t usable)
|
|||
* Copy and sanitize units.
|
||||
*
|
||||
* @param d a destination string.
|
||||
* @param s a source sting.
|
||||
* @param s a source string.
|
||||
* @param usable the number of characters to copy.
|
||||
* @param showoldunits set this flag to 1 to show old (before v1.12) units.
|
||||
* @return Returns the destination string.
|
||||
|
|
|
@ -177,7 +177,7 @@ type: Database
|
|||
| Cgroups | Alerts for cpu and memory usage of control groups |
|
||||
| Computing | Alerts for shared computing applications (e.g. boinc) |
|
||||
| Containers | Container related alerts (e.g. docker instances) |
|
||||
| Database | Database systems (e.g. MySQL, Postgress, etc) |
|
||||
| Database | Database systems (e.g. MySQL, PostgreSQL, etc) |
|
||||
| Data Sharing | Used to group together alerts for data sharing applications |
|
||||
| DHCP | Alerts for dhcp related services |
|
||||
| DNS | Alerts for dns related services |
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#chainhead_header is expected momenterarily to be ahead. If its considerably ahead (e.g more than 5 blocks), then the node is definetely out of sync.
|
||||
#chainhead_header is expected momenterarily to be ahead. If its considerably ahead (e.g more than 5 blocks), then the node is definitely out of sync.
|
||||
template: geth_chainhead_diff_between_header_block
|
||||
on: geth.chainhead
|
||||
class: Workload
|
||||
|
|
|
@ -319,7 +319,7 @@ SLACK_WEBHOOK_URL=
|
|||
# Microsoft Teams configs
|
||||
MSTEAMS_WEBHOOK_URL=
|
||||
|
||||
# Legacy Microsoft Teams configs for backwards compatability:
|
||||
# Legacy Microsoft Teams configs for backwards compatibility:
|
||||
declare -A role_recipients_msteam
|
||||
|
||||
# rocketchat configs
|
||||
|
|
|
@ -17,7 +17,7 @@ netdata WARNING on hostname at Tue Apr 3 09:00:00 EDT 2018: disk_space._ out of
|
|||
|
||||
System log targets are configured as recipients in [`/etc/netdata/health_alarm_notify.conf`](https://github.com/netdata/netdata/blob/36bedc044584dea791fd29455bdcd287c3306cb2/conf.d/health_alarm_notify.conf#L534) (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`).
|
||||
|
||||
You can als configure per-role targets in the same file a bit further down.
|
||||
You can also configure per-role targets in the same file a bit further down.
|
||||
|
||||
Targets are defined as follows:
|
||||
|
||||
|
|
|
@ -654,7 +654,7 @@ void ebpf_load_addresses(ebpf_addresses_t *fa, int fd)
|
|||
*
|
||||
* @param algorithms the output vector
|
||||
* @param length number of elements of algorithms vector
|
||||
* @param algortihm algorithm used on charts.
|
||||
* @param algorithm algorithm used on charts.
|
||||
*/
|
||||
void ebpf_fill_algorithms(int *algorithms, size_t length, int algorithm)
|
||||
{
|
||||
|
|
|
@ -183,7 +183,7 @@ static jsmnerr_t jsmn_parse_string(jsmn_parser *parser, const char *js,
|
|||
*
|
||||
* Parse JSON string and fill tokens.
|
||||
*
|
||||
* @param parser the auxiliar vector used to parser
|
||||
* @param parser the auxiliary vector used to parser
|
||||
* @param js the string to parse
|
||||
* @param len the string length
|
||||
* @param tokens the place to map the tokens
|
||||
|
|
|
@ -69,7 +69,7 @@ static inline pfwords *pfwords_new(void) {
|
|||
}
|
||||
|
||||
static inline void pfwords_reset(pfwords *fw) {
|
||||
// debug(D_PROCFILE, PF_PREFIX ": reseting words");
|
||||
// debug(D_PROCFILE, PF_PREFIX ": resetting words");
|
||||
fw->len = 0;
|
||||
}
|
||||
|
||||
|
@ -115,7 +115,7 @@ static inline pflines *pflines_new(void) {
|
|||
}
|
||||
|
||||
static inline void pflines_reset(pflines *fl) {
|
||||
// debug(D_PROCFILE, PF_PREFIX ": reseting lines");
|
||||
// debug(D_PROCFILE, PF_PREFIX ": resetting lines");
|
||||
|
||||
fl->len = 0;
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ installation. Here are a few important parameters:
|
|||
- `--no-updates`: Disable automatic updates.
|
||||
- `--stable-channel`: Use a stable build instead of a nightly build.
|
||||
- `--reinstall`: If an existing install is found, reinstall instead of trying to update it in place.
|
||||
- `--dont-start-it`: Don’t auto-start the daemon after installing. This parameter is not gauranteed to work.
|
||||
- `--dont-start-it`: Don’t auto-start the daemon after installing. This parameter is not guaranteed to work.
|
||||
- `--install`: Specify an alternative install prefix.
|
||||
- `--disable-cloud`: For local builds, don’t build any of the cloud code at all. For native packages and static builds,
|
||||
use runtime configuration to disable cloud support.
|
||||
|
|
|
@ -120,7 +120,7 @@ you can do the following to prepare a copy for the build system:
|
|||
4. Build JSON-C by running `make` in the JSON-C source directory.
|
||||
5. In the Netdata source directory, create a directory called
|
||||
`externaldeps/jsonc`.
|
||||
6. Copy `libjson-c.a` fro the JSON-C source directory to
|
||||
6. Copy `libjson-c.a` from the JSON-C source directory to
|
||||
`externaldeps/jsonc/libjson-c.a` in the Netdata source tree.
|
||||
7. Copy all of the header files (`*.h`) from the JSON-C source directory
|
||||
to `externaldeps/jsonc/json-c` in the Netdata source tree.
|
||||
|
|
|
@ -21,7 +21,7 @@ will install the content into `/opt/netdata`, making future removal safe and sim
|
|||
When Netdata is first installed, it will run as _root_. This may or may not be acceptable for you, and since other
|
||||
installations run it as the `netdata` user, you might wish to do the same. This requires some extra work:
|
||||
|
||||
1. Creat a group `netdata` via the Synology group interface. Give it no access to anything.
|
||||
1. Create a group `netdata` via the Synology group interface. Give it no access to anything.
|
||||
2. Create a user `netdata` via the Synology user interface. Give it no access to anything and a random password. Assign
|
||||
the user to the `netdata` group. Netdata will chuid to this user when running.
|
||||
3. Change ownership of the following directories, as defined in [Netdata
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
# Utility to create self-extracting tar.gz archives.
|
||||
# The resulting archive is a file holding the tar.gz archive with
|
||||
# a small Shell script stub that uncompresses the archive to a temporary
|
||||
# directory and then executes a given script from withing that directory.
|
||||
# directory and then executes a given script from within that directory.
|
||||
#
|
||||
# Makeself home page: http://makeself.io/
|
||||
#
|
||||
|
|
|
@ -17,7 +17,7 @@ extern size_t procfile_max_allocation;
|
|||
|
||||
|
||||
static inline void pflines_reset(pflines *fl) {
|
||||
// debug(D_PROCFILE, PF_PREFIX ": reseting lines");
|
||||
// debug(D_PROCFILE, PF_PREFIX ": resetting lines");
|
||||
|
||||
fl->len = 0;
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ static inline void pflines_free(pflines *fl) {
|
|||
}
|
||||
|
||||
static inline void pfwords_reset(pfwords *fw) {
|
||||
// debug(D_PROCFILE, PF_PREFIX ": reseting words");
|
||||
// debug(D_PROCFILE, PF_PREFIX ": resetting words");
|
||||
fw->len = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -410,7 +410,7 @@ static void empty_url(void **state)
|
|||
}
|
||||
|
||||
/* If the %-escape is being performed at the correct time then the url should not be treated as a query, but instead
|
||||
as a path "/api/v1/info?blah?" which should despatch into the API with the given values.
|
||||
as a path "/api/v1/info?blah?" which should dispatch into the API with the given values.
|
||||
*/
|
||||
static void not_a_query(void **state)
|
||||
{
|
||||
|
|
|
@ -79,7 +79,7 @@ static struct web_client *web_client_alloc(void) {
|
|||
// Comments per server:
|
||||
// SINGLE-THREADED : 1 cache is maintained
|
||||
// MULTI-THREADED : 1 cache is maintained
|
||||
// STATIC-THREADED : 1 cache for each thred of the web server
|
||||
// STATIC-THREADED : 1 cache for each thread of the web server
|
||||
|
||||
__thread struct clients_cache web_clients_cache = {
|
||||
.pid = 0,
|
||||
|
|
Loading…
Add table
Reference in a new issue