0
0
Fork 0
mirror of https://github.com/netdata/netdata.git synced 2025-04-14 09:38:34 +00:00

Revert changes since v1.21 in pereparation for hotfix release.

This commit is contained in:
Austin S. Hemmelgarn 2020-04-13 08:39:52 -04:00
parent 353780082c
commit e2874320fc
No known key found for this signature in database
GPG key ID: 4138B50D9B8289C6
56 changed files with 442 additions and 1647 deletions

View file

@ -1112,8 +1112,6 @@ endif()
-Wl,--wrap=recv
-Wl,--wrap=send
-Wl,--wrap=connect_to_one_of
-Wl,--wrap=create_main_rusage_chart
-Wl,--wrap=send_main_rusage
${PROMETHEUS_REMOTE_WRITE_LINK_OPTIONS}
${KINESIS_LINK_OPTIONS}
${MONGODB_LINK_OPTIONS}

View file

@ -573,7 +573,6 @@ NETDATA_FILES = \
$(LIBNETDATA_FILES) \
$(API_PLUGIN_FILES) \
$(BACKENDS_PLUGIN_FILES) \
$(EXPORTING_ENGINE_FILES) \
$(CHECKS_PLUGIN_FILES) \
$(HEALTH_PLUGIN_FILES) \
$(IDLEJITTER_PLUGIN_FILES) \
@ -609,6 +608,12 @@ if LINUX
endif
if ENABLE_EXPORTING
NETDATA_FILES += \
$(EXPORTING_ENGINE_FILES) \
$(NULL)
endif
NETDATA_COMMON_LIBS = \
$(OPTIONAL_MATH_LIBS) \
$(OPTIONAL_ZLIB_LIBS) \
@ -740,13 +745,23 @@ if ENABLE_PLUGIN_SLABINFO
$(NULL)
endif
if ENABLE_EXPORTING
if ENABLE_BACKEND_KINESIS
netdata_SOURCES += $(KINESIS_BACKEND_FILES) $(KINESIS_EXPORTING_FILES)
netdata_SOURCES += $(KINESIS_EXPORTING_FILES)
netdata_LDADD += $(OPTIONAL_KINESIS_LIBS)
endif
endif
if ENABLE_BACKEND_KINESIS
netdata_SOURCES += $(KINESIS_BACKEND_FILES)
netdata_LDADD += $(OPTIONAL_KINESIS_LIBS)
endif
if ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE
netdata_SOURCES += $(PROMETHEUS_REMOTE_WRITE_BACKEND_FILES) $(PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES)
if ENABLE_EXPORTING
netdata_SOURCES += $(PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES)
endif
netdata_SOURCES += $(PROMETHEUS_REMOTE_WRITE_BACKEND_FILES)
netdata_LDADD += $(OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS)
BUILT_SOURCES = \
exporting/prometheus/remote_write/remote_write.pb.cc \
@ -760,8 +775,15 @@ exporting/prometheus/remote_write/remote_write.pb.h: exporting/prometheus/remote
endif
if ENABLE_EXPORTING
if ENABLE_BACKEND_MONGODB
netdata_SOURCES += $(MONGODB_BACKEND_FILES) $(MONGODB_EXPORTING_FILES)
netdata_SOURCES += $(MONGODB_EXPORTING_FILES)
netdata_LDADD += $(OPTIONAL_MONGOC_LIBS)
endif
endif
if ENABLE_BACKEND_MONGODB
netdata_SOURCES += $(MONGODB_BACKEND_FILES)
netdata_LDADD += $(OPTIONAL_MONGOC_LIBS)
endif
@ -873,8 +895,6 @@ if ENABLE_UNITTESTS
-Wl,--wrap=recv \
-Wl,--wrap=send \
-Wl,--wrap=connect_to_one_of \
-Wl,--wrap=create_main_rusage_chart \
-Wl,--wrap=send_main_rusage \
$(TEST_LDFLAGS) \
$(NULL)
exporting_tests_exporting_engine_testdriver_LDADD = $(NETDATA_COMMON_LIBS) $(TEST_LIBS)

View file

@ -1,7 +1,6 @@
<!--
---
title: "Netdata"
date: 2020-04-06
custom_edit_url: https://github.com/netdata/netdata/edit/master/README.md
---
-->
@ -34,7 +33,7 @@ granularity. Run this long-term storage autonomously, or integrate Netdata with
Netdata is **fast** and **efficient**, designed to permanently run on all systems (**physical** and **virtual** servers,
**containers**, **IoT** devices), without disrupting their core function.
Netdata is **free, open-source software** and it currently runs on **Linux**, **FreeBSD**, and **macOS**, along with
Netdata is **free, open-source software** and it currently runs on **Linux**, **FreeBSD**, and **MacOS**, along with
other systems derived from them, such as **Kubernetes** and **Docker**.
Netdata is not hosted by the CNCF but is the 3rd most starred open-source project in the [Cloud Native Computing

View file

@ -23,8 +23,6 @@ static char *aclk_password = NULL;
static char *global_base_topic = NULL;
static int aclk_connecting = 0;
int aclk_connected = 0; // Exposed in the web-api
usec_t aclk_session_us = 0; // Used by the mqtt layer
time_t aclk_session_sec = 0; // Used by the mqtt layer
static netdata_mutex_t aclk_mutex = NETDATA_MUTEX_INITIALIZER;
static netdata_mutex_t query_mutex = NETDATA_MUTEX_INITIALIZER;
@ -187,7 +185,7 @@ biofailed:
* should be called with
*
* mode 0 to reset the delay
* mode 1 to calculate sleep time [0 .. ACLK_MAX_BACKOFF_DELAY * 1000] ms
* mode 1 to sleep for the calculated amount of time [0 .. ACLK_MAX_BACKOFF_DELAY * 1000] ms
*
*/
unsigned long int aclk_reconnect_delay(int mode)
@ -210,6 +208,8 @@ unsigned long int aclk_reconnect_delay(int mode)
delay = (delay * 1000) + (random() % 1000);
}
// sleep_usec(USEC_PER_MS * delay);
return delay;
}
@ -306,7 +306,7 @@ int aclk_queue_query(char *topic, char *data, char *msg_id, char *query, int run
if (tmp_query->run_after == run_after) {
QUERY_UNLOCK;
QUERY_THREAD_WAKEUP;
return 0;
return 1;
}
if (last_query)
@ -750,8 +750,8 @@ int aclk_execute_query(struct aclk_query *this_query)
buffer_flush(local_buffer);
local_buffer->contenttype = CT_APPLICATION_JSON;
aclk_create_header(local_buffer, "http", this_query->msg_id, 0, 0);
buffer_strcat(local_buffer, ",\n\t\"payload\": ");
aclk_create_header(local_buffer, "http", this_query->msg_id);
char *encoded_response = aclk_encode_response(w->response.data);
buffer_sprintf(
@ -821,6 +821,11 @@ int aclk_process_query()
aclk_send_message(this_query->topic, this_query->query, this_query->msg_id);
break;
case ACLK_CMD_ALARMS:
debug(D_ACLK, "EXECUTING an alarms update command");
aclk_send_alarm_metadata();
break;
case ACLK_CMD_CLOUD:
debug(D_ACLK, "EXECUTING a cloud command");
aclk_execute_query(this_query);
@ -863,22 +868,18 @@ int aclk_process_queries()
static void aclk_query_thread_cleanup(void *ptr)
{
struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
info("cleaning up...");
COLLECTOR_LOCK;
_reset_collector_list();
freez(collector_list);
// Clean memory for pending queries if any
struct aclk_query *this_query;
COLLECTOR_UNLOCK;
do {
this_query = aclk_queue_pop();
aclk_query_free(this_query);
} while (this_query);
freez(static_thread->thread);
freez(static_thread);
static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
}
/**
@ -915,7 +916,7 @@ void *aclk_query_main_thread(void *ptr)
if (unlikely(aclk_queue_query("on_connect", NULL, NULL, NULL, 0, 1, ACLK_CMD_ONCONNECT))) {
errno = 0;
error("ACLK failed to queue on_connect command");
aclk_metadata_submitted = ACLK_METADATA_REQUIRED;
aclk_metadata_submitted = 0;
}
}
@ -938,6 +939,7 @@ void *aclk_query_main_thread(void *ptr)
// Thread cleanup
static void aclk_main_cleanup(void *ptr)
{
char payload[512];
struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
@ -950,11 +952,24 @@ static void aclk_main_cleanup(void *ptr)
// Wakeup thread to cleanup
QUERY_THREAD_WAKEUP;
// Send a graceful disconnect message
BUFFER *b = buffer_create(512);
aclk_create_header(b, "disconnect", NULL, 0, 0);
buffer_strcat(b, ",\n\t\"payload\": \"graceful\"}\n");
aclk_send_message(ACLK_METADATA_TOPIC, (char*)buffer_tostring(b), NULL);
buffer_free(b);
char *msg_id = create_uuid();
usec_t time_created_offset_usec = now_realtime_usec();
time_t time_created = time_created_offset_usec / USEC_PER_SEC;
time_created_offset_usec = time_created_offset_usec % USEC_PER_SEC;
snprintfz(
payload, 511,
"{ \"type\": \"disconnect\","
" \"msg-id\": \"%s\","
" \"timestamp\": %ld,"
" \"timestamp-offset-usec\": %llu,"
" \"version\": %d,"
" \"payload\": \"graceful\" }",
msg_id, time_created, time_created_offset_usec, ACLK_VERSION);
aclk_send_message(ACLK_METADATA_TOPIC, payload, msg_id);
freez(msg_id);
event_loop_timeout = now_realtime_sec() + 5;
write_q = 1;
@ -975,6 +990,7 @@ static void aclk_main_cleanup(void *ptr)
}
}
info("Disconnected");
static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
}
@ -1279,6 +1295,7 @@ void *aclk_main(void *ptr)
{
struct netdata_static_thread *query_thread;
netdata_thread_cleanup_push(aclk_main_cleanup, ptr);
if (!netdata_cloud_setting) {
info("Killing ACLK thread -> cloud functionality has been disabled");
return NULL;
@ -1318,11 +1335,10 @@ void *aclk_main(void *ptr)
sleep_usec(USEC_PER_SEC * 60);
}
create_publish_base_topic();
create_private_key();
usec_t reconnect_expiry = 0; // In usecs
netdata_thread_disable_cancelability();
while (!netdata_exit) {
static int first_init = 0;
size_t write_q, write_q_bytes, read_q;
@ -1376,8 +1392,7 @@ void *aclk_main(void *ptr)
}
} // forever
exited:
// Wakeup query thread to cleanup
QUERY_THREAD_WAKEUP;
aclk_shutdown();
freez(aclk_username);
freez(aclk_password);
@ -1386,7 +1401,7 @@ exited:
if (aclk_private_key != NULL)
RSA_free(aclk_private_key);
aclk_main_cleanup(ptr);
netdata_thread_cleanup_pop(1);
return NULL;
}
@ -1499,7 +1514,7 @@ void aclk_shutdown()
info("Shutdown complete");
}
inline void aclk_create_header(BUFFER *dest, char *type, char *msg_id, time_t ts_secs, usec_t ts_us)
inline void aclk_create_header(BUFFER *dest, char *type, char *msg_id)
{
uuid_t uuid;
char uuid_str[36 + 1];
@ -1510,11 +1525,9 @@ inline void aclk_create_header(BUFFER *dest, char *type, char *msg_id, time_t ts
msg_id = uuid_str;
}
if (ts_secs == 0) {
ts_us = now_realtime_usec();
ts_secs = ts_us / USEC_PER_SEC;
ts_us = ts_us % USEC_PER_SEC;
}
usec_t time_created_offset_usec = now_realtime_usec();
time_t time_created = time_created_offset_usec / USEC_PER_SEC;
time_created_offset_usec = time_created_offset_usec % USEC_PER_SEC;
buffer_sprintf(
dest,
@ -1522,12 +1535,11 @@ inline void aclk_create_header(BUFFER *dest, char *type, char *msg_id, time_t ts
"\t\"msg-id\": \"%s\",\n"
"\t\"timestamp\": %ld,\n"
"\t\"timestamp-offset-usec\": %llu,\n"
"\t\"connect\": %ld,\n"
"\t\"connect-offset-usec\": %llu,\n"
"\t\"version\": %d",
type, msg_id, ts_secs, ts_us, aclk_session_sec, aclk_session_us, ACLK_VERSION);
"\t\"version\": %d,\n"
"\t\"payload\": ",
type, msg_id, time_created, time_created_offset_usec, ACLK_VERSION);
debug(D_ACLK, "Sending v%d msgid [%s] type [%s] time [%ld]", ACLK_VERSION, msg_id, type, ts_secs);
debug(D_ACLK, "Sending v%d msgid [%s] type [%s] time [%ld]", ACLK_VERSION, msg_id, type, time_created);
}
/*
@ -1587,15 +1599,7 @@ void aclk_send_alarm_metadata()
debug(D_ACLK, "Metadata alarms start");
// on_connect messages are sent on a health reload, if the on_connect message is real then we
// use the session time as the fake timestamp to indicate that it starts the session. If it is
// a fake on_connect message then use the real timestamp to indicate it is within the existing
// session.
if (aclk_metadata_submitted == ACLK_METADATA_SENT)
aclk_create_header(local_buffer, "connect_alarms", msg_id, 0, 0);
else
aclk_create_header(local_buffer, "connect_alarms", msg_id, aclk_session_sec, aclk_session_us);
buffer_strcat(local_buffer, ",\n\t\"payload\": ");
aclk_create_header(local_buffer, "connect_alarms", msg_id);
buffer_sprintf(local_buffer, "{\n\t \"configured-alarms\" : ");
health_alarms2json(localhost, local_buffer, 1);
@ -1631,16 +1635,7 @@ int aclk_send_info_metadata()
buffer_flush(local_buffer);
local_buffer->contenttype = CT_APPLICATION_JSON;
// on_connect messages are sent on a health reload, if the on_connect message is real then we
// use the session time as the fake timestamp to indicate that it starts the session. If it is
// a fake on_connect message then use the real timestamp to indicate it is within the existing
// session.
if (aclk_metadata_submitted == ACLK_METADATA_SENT)
aclk_create_header(local_buffer, "connect", msg_id, 0, 0);
else
aclk_create_header(local_buffer, "connect", msg_id, aclk_session_sec, aclk_session_us);
buffer_strcat(local_buffer, ",\n\t\"payload\": ");
aclk_create_header(local_buffer, "connect", msg_id);
buffer_sprintf(local_buffer, "{\n\t \"info\" : ");
web_client_api_request_v1_info_fill_buffer(localhost, local_buffer);
debug(D_ACLK, "Metadata %s with info has %zu bytes", msg_id, local_buffer->len);
@ -1733,9 +1728,7 @@ int aclk_send_single_chart(char *hostname, char *chart)
buffer_flush(local_buffer);
local_buffer->contenttype = CT_APPLICATION_JSON;
aclk_create_header(local_buffer, "chart", msg_id, 0, 0);
buffer_strcat(local_buffer, ",\n\t\"payload\": ");
aclk_create_header(local_buffer, "chart", msg_id);
rrdset2json(st, local_buffer, NULL, NULL, 1);
buffer_sprintf(local_buffer, "\t\n}");
@ -1800,8 +1793,7 @@ int aclk_update_alarm(RRDHOST *host, ALARM_ENTRY *ae)
char *msg_id = create_uuid();
buffer_flush(local_buffer);
aclk_create_header(local_buffer, "status-change", msg_id, 0, 0);
buffer_strcat(local_buffer, ",\n\t\"payload\": ");
aclk_create_header(local_buffer, "status-change", msg_id);
netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock);
health_alarm_entry2json_nolock(local_buffer, ae, host);
@ -1871,12 +1863,6 @@ int aclk_handle_cloud_request(char *payload)
return 1;
}
// Checked to be "http", not needed anymore
if (likely(cloud_to_agent.type_id)) {
freez(cloud_to_agent.type_id);
cloud_to_agent.type_id = NULL;
}
if (unlikely(aclk_submit_request(&cloud_to_agent)))
debug(D_ACLK, "ACLK failed to queue incoming message (%s)", payload);

View file

@ -44,6 +44,7 @@ typedef enum aclk_cmd {
ACLK_CMD_CHART,
ACLK_CMD_CHARTDEL,
ACLK_CMD_ALARM,
ACLK_CMD_ALARMS,
ACLK_CMD_MAX
} ACLK_CMD;
@ -73,12 +74,16 @@ void *aclk_main(void *ptr);
extern int aclk_send_message(char *sub_topic, char *message, char *msg_id);
//int aclk_init();
//char *get_base_topic();
extern char *is_agent_claimed(void);
extern void aclk_lws_wss_mqtt_layer_disconect_notif();
char *create_uuid();
// callbacks for agent cloud link
int aclk_subscribe(char *topic, int qos);
void aclk_shutdown();
int cloud_to_agent_parse(JSON_ENTRY *e);
void aclk_disconnect();
void aclk_connect();
@ -93,7 +98,7 @@ struct aclk_query *
aclk_query_find(char *token, char *data, char *msg_id, char *query, ACLK_CMD cmd, struct aclk_query **last_query);
int aclk_update_chart(RRDHOST *host, char *chart_name, ACLK_CMD aclk_cmd);
int aclk_update_alarm(RRDHOST *host, ALARM_ENTRY *ae);
void aclk_create_header(BUFFER *dest, char *type, char *msg_id, time_t ts_secs, usec_t ts_us);
void aclk_create_header(BUFFER *dest, char *type, char *msg_id);
int aclk_handle_cloud_request(char *payload);
int aclk_submit_request(struct aclk_request *);
void aclk_add_collector(const char *hostname, const char *plugin_name, const char *module_name);

View file

@ -5,9 +5,6 @@
#include "mqtt.h"
#include "aclk_lws_wss_client.h"
extern usec_t aclk_session_us;
extern time_t aclk_session_sec;
inline const char *_link_strerror(int rc)
{
return mosquitto_strerror(rc);
@ -52,12 +49,7 @@ void disconnect_callback(struct mosquitto *mosq, void *obj, int rc)
UNUSED(obj);
UNUSED(rc);
if (netdata_exit)
info("Connection to cloud terminated due to agent shutdown");
else {
errno = 0;
error("Connection to cloud failed");
}
info("Connection to cloud failed");
aclk_disconnect();
aclk_lws_wss_mqtt_layer_disconect_notif();
@ -139,11 +131,6 @@ static int _mqtt_create_connection(char *username, char *password)
return MOSQ_ERR_UNKNOWN;
}
// Record the session start time to allow a nominal LWT timestamp
usec_t now = now_realtime_usec();
aclk_session_sec = now / USEC_PER_SEC;
aclk_session_us = now % USEC_PER_SEC;
_link_set_lwt("outbound/meta", 2);
mosquitto_connect_callback_set(mosq, connect_callback);
@ -272,6 +259,7 @@ int _link_set_lwt(char *sub_topic, int qos)
{
int rc;
char topic[ACLK_MAX_TOPIC + 1];
char payload[512];
char *final_topic;
final_topic = get_topic(sub_topic, topic, ACLK_MAX_TOPIC);
@ -281,13 +269,25 @@ int _link_set_lwt(char *sub_topic, int qos)
return 1;
}
usec_t lwt_time = aclk_session_sec * USEC_PER_SEC + aclk_session_us + 1;
BUFFER *b = buffer_create(512);
aclk_create_header(b, "disconnect", NULL, lwt_time / USEC_PER_SEC, lwt_time % USEC_PER_SEC);
buffer_strcat(b, ", \"payload\": \"unexpected\" }");
rc = mosquitto_will_set(mosq, topic, buffer_strlen(b), buffer_tostring(b), qos, 0);
buffer_free(b);
usec_t time_created_offset_usec = now_realtime_usec();
time_t time_created = time_created_offset_usec / USEC_PER_SEC;
time_created_offset_usec = time_created_offset_usec % USEC_PER_SEC;
char *msg_id = create_uuid();
snprintfz(
payload, 511,
"{ \"type\": \"disconnect\","
" \"msg-id\": \"%s\","
" \"timestamp\": %ld,"
" \"timestamp-offset-usec\": %llu,"
" \"version\": %d,"
" \"payload\": \"unexpected\" }",
msg_id, time_created, time_created_offset_usec, ACLK_VERSION);
freez(msg_id);
rc = mosquitto_will_set(mosq, topic, strlen(payload), (const void *) payload, qos, 0);
return rc;
}

View file

@ -27,6 +27,10 @@ typedef enum backend_types {
BACKEND_TYPE_NUM // Number of backend types
} BACKEND_TYPE;
#ifdef ENABLE_EXPORTING
#include "exporting/exporting_engine.h"
#endif
typedef int (**backend_response_checker_t)(BUFFER *);
typedef int (**backend_request_formatter_t)(BUFFER *, const char *, RRDHOST *, const char *, RRDSET *, RRDDIM *, time_t, time_t, BACKEND_OPTIONS);

View file

@ -44,7 +44,7 @@ static inline time_t prometheus_server_last_access(const char *server, RRDHOST *
return 0;
}
static inline size_t backends_prometheus_name_copy(char *d, const char *s, size_t usable) {
static inline size_t prometheus_name_copy(char *d, const char *s, size_t usable) {
size_t n;
for(n = 0; *s && n < usable ; d++, s++, n++) {
@ -58,7 +58,7 @@ static inline size_t backends_prometheus_name_copy(char *d, const char *s, size_
return n;
}
static inline size_t backends_prometheus_label_copy(char *d, const char *s, size_t usable) {
static inline size_t prometheus_label_copy(char *d, const char *s, size_t usable) {
size_t n;
// make sure we can escape one character without overflowing the buffer
@ -78,7 +78,7 @@ static inline size_t backends_prometheus_label_copy(char *d, const char *s, size
return n;
}
static inline char *backends_prometheus_units_copy(char *d, const char *s, size_t usable, int showoldunits) {
static inline char *prometheus_units_copy(char *d, const char *s, size_t usable, int showoldunits) {
const char *sorig = s;
char *ret = d;
size_t n;
@ -194,7 +194,7 @@ static int print_host_variables(RRDVAR *rv, void *data) {
label_post = "}";
}
backends_prometheus_name_copy(opts->name, rv->name, sizeof(opts->name));
prometheus_name_copy(opts->name, rv->name, sizeof(opts->name));
if(opts->output_options & BACKENDS_PROMETHEUS_OUTPUT_TIMESTAMPS)
buffer_sprintf(opts->wb
@ -227,7 +227,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER
rrdhost_rdlock(host);
char hostname[PROMETHEUS_ELEMENT_MAX + 1];
backends_prometheus_label_copy(hostname, host->hostname, PROMETHEUS_ELEMENT_MAX);
prometheus_label_copy(hostname, host->hostname, PROMETHEUS_ELEMENT_MAX);
char labels[PROMETHEUS_LABELS_MAX + 1] = "";
if(allhosts) {
@ -299,9 +299,9 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER
char family[PROMETHEUS_ELEMENT_MAX + 1];
char units[PROMETHEUS_ELEMENT_MAX + 1] = "";
backends_prometheus_label_copy(chart, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && st->name)?st->name:st->id, PROMETHEUS_ELEMENT_MAX);
backends_prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
backends_prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
prometheus_label_copy(chart, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && st->name)?st->name:st->id, PROMETHEUS_ELEMENT_MAX);
prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
if(likely(backends_can_send_rrdset(backend_options, st))) {
rrdset_rdlock(st);
@ -317,7 +317,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER
}
else {
if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AVERAGE && !(output_options & BACKENDS_PROMETHEUS_OUTPUT_HIDEUNITS))
backends_prometheus_units_copy(units, st->units, PROMETHEUS_ELEMENT_MAX, output_options & BACKENDS_PROMETHEUS_OUTPUT_OLDUNITS);
prometheus_units_copy(units, st->units, PROMETHEUS_ELEMENT_MAX, output_options & BACKENDS_PROMETHEUS_OUTPUT_OLDUNITS);
}
if(unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP))
@ -354,7 +354,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER
// all the dimensions of the chart, has the same algorithm, multiplier and divisor
// we add all dimensions as labels
backends_prometheus_label_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
prometheus_label_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
if(unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP))
buffer_sprintf(wb
@ -411,7 +411,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER
// the dimensions of the chart, do not have the same algorithm, multiplier or divisor
// we create a metric per dimension
backends_prometheus_name_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
prometheus_name_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
if(unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP))
buffer_sprintf(wb
@ -480,7 +480,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER
else if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_SUM)
suffix = "_sum";
backends_prometheus_label_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
prometheus_label_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
if (unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP))
buffer_sprintf(wb, "# COMMENT %s_%s%s%s: dimension \"%s\", value is %s, gauge, dt %llu to %llu inclusive\n"
@ -593,7 +593,7 @@ void backends_rrd_stats_remote_write_allmetrics_prometheus(
, size_t *count_dims_skipped
) {
char hostname[PROMETHEUS_ELEMENT_MAX + 1];
backends_prometheus_label_copy(hostname, __hostname, PROMETHEUS_ELEMENT_MAX);
prometheus_label_copy(hostname, __hostname, PROMETHEUS_ELEMENT_MAX);
backends_add_host_info("netdata_info", hostname, host->program_name, host->program_version, now_realtime_usec() / USEC_PER_MS);
@ -620,9 +620,9 @@ void backends_rrd_stats_remote_write_allmetrics_prometheus(
char family[PROMETHEUS_ELEMENT_MAX + 1];
char units[PROMETHEUS_ELEMENT_MAX + 1] = "";
backends_prometheus_label_copy(chart, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, PROMETHEUS_ELEMENT_MAX);
backends_prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
backends_prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
prometheus_label_copy(chart, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, PROMETHEUS_ELEMENT_MAX);
prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
if(likely(backends_can_send_rrdset(backend_options, st))) {
rrdset_rdlock(st);
@ -640,7 +640,7 @@ void backends_rrd_stats_remote_write_allmetrics_prometheus(
}
else {
if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AVERAGE)
backends_prometheus_units_copy(units, st->units, PROMETHEUS_ELEMENT_MAX, 0);
prometheus_units_copy(units, st->units, PROMETHEUS_ELEMENT_MAX, 0);
}
// for each dimension
@ -664,7 +664,7 @@ void backends_rrd_stats_remote_write_allmetrics_prometheus(
// all the dimensions of the chart, has the same algorithm, multiplier and divisor
// we add all dimensions as labels
backends_prometheus_label_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
prometheus_label_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s%s", prefix, context, suffix);
backends_add_metric(name, chart, family, dimension, hostname, rd->last_collected_value, timeval_msec(&rd->last_collected_time));
@ -674,7 +674,7 @@ void backends_rrd_stats_remote_write_allmetrics_prometheus(
// the dimensions of the chart, do not have the same algorithm, multiplier or divisor
// we create a metric per dimension
backends_prometheus_name_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
prometheus_name_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s_%s%s", prefix, context, dimension, suffix);
backends_add_metric(name, chart, family, NULL, hostname, rd->last_collected_value, timeval_msec(&rd->last_collected_time));
@ -694,7 +694,7 @@ void backends_rrd_stats_remote_write_allmetrics_prometheus(
else if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_SUM)
suffix = "_sum";
backends_prometheus_label_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
prometheus_label_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s%s%s", prefix, context, units, suffix);
backends_add_metric(name, chart, family, dimension, hostname, value, last_t * MSEC_PER_SEC);

View file

@ -1,7 +1,6 @@
<!--
---
title: "External build-system"
date: 2020-03-31
custom_edit_url: https://github.com/netdata/netdata/edit/master/build_external/README.md
---
-->
@ -10,7 +9,7 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/build_external/R
This wraps the build-system in Docker so that the host system and the target system are
decoupled. This allows:
* Cross-compilation (e.g. linux development from macOS)
* Cross-compilation (e.g. linux development from MacOS)
* Cross-distro (e.g. using CentOS user-land while developing on Debian)
* Multi-host scenarios (e.g. master/slave configurations)
* Bleeding-edge sceneraios (e.g. using the ACLK (**currently for internal-use only**))

View file

@ -1,7 +1,6 @@
<!--
---
title: "Supported collectors list"
date: 2020-03-31
custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/COLLECTORS.md
---
-->
@ -38,7 +37,7 @@ collector—we may be looking for contributions from users such as yourself!
| [diskspace.plugin](diskspace.plugin/README.md) | Linux | Collects disk space usage metrics on Linux mount points. |
| [freebsd.plugin](freebsd.plugin/README.md) | FreeBSD | Collects resource usage and performance data on FreeBSD systems. |
| [idlejitter.plugin](idlejitter.plugin/README.md) | any | Measures CPU latency and jitter on all operating systems. |
| [macos.plugin](macos.plugin/README.md) | macos | Collects resource usage and performance data on macOS systems. |
| [macos.plugin](macos.plugin/README.md) | macos | Collects resource usage and performance data on MacOS systems. |
| [proc.plugin](proc.plugin/README.md) | Linux | Collects resource usage and performance data on Linux systems. |
| [slabinfo.plugin](slabinfo.plugin/README.md) | Linux | Collects kernel SLAB details on Linux systems. |
| [statsd.plugin](statsd.plugin/README.md) | any | Implements a high performance `statsd` server for Netdata. |

View file

@ -96,7 +96,7 @@ fail2ban: fail2ban*
# -----------------------------------------------------------------------------
# web/ftp servers
httpd: apache* httpd nginx* lighttpd hiawatha
httpd: apache* httpd nginx* lighttpd
proxy: squid* c-icap squidGuard varnish*
php: php*
ftpd: proftpd in.tftpd vsftpd
@ -107,7 +107,7 @@ puma: *puma*
# -----------------------------------------------------------------------------
# database servers
sql: mysqld* mariad* postgres* postmaster* oracle_* ora_* sqlservr
sql: mysqld* mariad* postgres* postmaster* oracle_* ora_*
nosql: mongod redis* memcached *couchdb*
timedb: prometheus *carbon-cache.py* *carbon-aggregator.py* *graphite/manage.py* *net.opentsdb.tools.TSDMain* influxd*
columndb: clickhouse-server*
@ -223,7 +223,7 @@ torrents: *deluge* transmission* *SickBeard* *CouchPotato* *rtorrent*
# -----------------------------------------------------------------------------
# backup servers and clients
backup: rsync lsyncd bacula* borg
backup: rsync lsyncd bacula*
# -----------------------------------------------------------------------------
# cron
@ -239,7 +239,7 @@ ups: upsmon upsd */nut/*
# media players, servers, clients
media: mplayer vlc xine mediatomb omxplayer* kodi* xbmc* mediacenter eventlircd
media: mpd minidlnad mt-daapd avahi* Plex* squeeze*
media: mpd minidlnad mt-daapd avahi* Plex*
# -----------------------------------------------------------------------------
# java applications

View file

@ -4110,6 +4110,8 @@ int main(int argc, char **argv) {
procfile_adaptive_initial_allocation = 1;
time_t started_t = now_monotonic_sec();
get_system_HZ();
#ifdef __FreeBSD__
time_factor = 1000000ULL / RATES_DETAIL; // FreeBSD uses usecs
@ -4210,5 +4212,8 @@ int main(int argc, char **argv) {
show_guest_time_old = show_guest_time;
debug_log("done Loop No %zu", global_iterations_counter);
// restart check (14400 seconds)
if(now_monotonic_sec() - started_t > 14400) exit(0);
}
}

View file

@ -37,16 +37,6 @@ declare -A libreswan_established_add_time=()
# we need this to avoid converting tunnel names to chart IDs on every iteration
declare -A libreswan_tunnel_charts=()
is_able_sudo_ipsec() {
if ! sudo -n -l "${IPSEC_CMD}" whack --status > /dev/null 2>&1; then
return 1
fi
if ! sudo -n -l "${IPSEC_CMD}" whack --trafficstatus > /dev/null 2>&1; then
return 1
fi
return 0
}
# run the ipsec command
libreswan_ipsec() {
if [ ${libreswan_sudo} -ne 0 ]; then
@ -102,11 +92,6 @@ libreswan_check() {
return 1
fi
if [ ${libreswan_sudo} -ne 0 ] && ! is_able_sudo_ipsec; then
error "not enough permissions to execute ipsec with sudo. Disabling Libreswan plugin."
return 1
fi
# check that we can collect data
libreswan_get || return 1

View file

@ -7,7 +7,7 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/macos
# macos.plugin
Collects resource usage and performance data on macOS systems
Collects resource usage and performance data on MacOS systems
By default, Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.

View file

@ -347,7 +347,7 @@ CHARTS = {
]
},
'threads_creation_rate': {
'options': [None, 'Threads Creation Rate', 'threads/s', 'threads', 'mysql.threads_creation_rate', 'line'],
'options': [None, 'Threads Creation Rate', 'threads/s', 'threads', 'mysql.threads', 'line'],
'lines': [
['Threads_created', 'created', 'incremental'],
]

View file

@ -433,6 +433,20 @@ fi
AC_MSG_RESULT([${enable_https}])
AM_CONDITIONAL([ENABLE_HTTPS], [test "${enable_https}" = "yes"])
# -----------------------------------------------------------------------------
# Exporting engine
AC_MSG_CHECKING([if netdata exporting engine should be used])
if test "${UV_LIBS}"; then
enable_exporting_engine="yes"
AC_DEFINE([ENABLE_EXPORTING], [1], [netdata exporting engine usability])
OPTIONAL_UV_CFLAGS="${UV_CFLAGS}"
OPTIONAL_UV_LIBS="${UV_LIBS}"
else
enable_exporting_engine="no"
fi
AC_MSG_RESULT([${enable_exporting_engine}])
AM_CONDITIONAL([ENABLE_EXPORTING], [test "${enable_exporting_engine}" = "yes"])
# -----------------------------------------------------------------------------
# JSON-C
test "${enable_jsonc}" = "yes" -a -z "${JSONC_LIBS}" && \

View file

@ -50,8 +50,6 @@
// backends for archiving the metrics
#include "backends/backends.h"
// the new exporting engine for archiving the metrics
#include "exporting/exporting_engine.h"
// the netdata API
#include "web/api/web_api_v1.h"

View file

@ -1,7 +1,6 @@
<!--
---
title: "Daemon configuration"
date: 2020-03-31
custom_edit_url: https://github.com/netdata/netdata/edit/master/daemon/config/README.md
---
-->
@ -221,7 +220,7 @@ For example, the `system.io` chart has the following default settings:
These `dim` settings produce two dimensions, `in` and `out`, both of which use the `incremental` algorithm. By
multiplying the value of `out` by -1, Netdata creates the negative values seen in the following area chart:
![The system.io chart on a macOS
![The system.io chart on a MacOS
laptop](https://user-images.githubusercontent.com/1153921/69286708-2cfb3900-0bb1-11ea-9fcd-dd8fbb2adf11.png)
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdaemon%2Fconfig%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)

View file

@ -80,7 +80,9 @@ struct netdata_static_thread static_threads[] = {
// common plugins for all systems
{"BACKENDS", NULL, NULL, 1, NULL, NULL, backends_main},
#ifdef ENABLE_EXPORTING
{"EXPORTING", NULL, NULL, 1, NULL, NULL, exporting_main},
#endif
{"WEB_SERVER[static1]", NULL, NULL, 0, NULL, NULL, socket_listen_main_static_threaded},
{"STREAM", NULL, NULL, 0, NULL, NULL, rrdpush_sender_thread},

View file

@ -1,2 +1,2 @@
mkdocs>=1.0.1
mkdocs-material==4.6.3
mkdocs-material

View file

@ -81,7 +81,7 @@ You can bind Netdata to multiple IPs and ports. If you use hostnames, Netdata wi
For cloud based installations, if your cloud provider does not provide such a private LAN (or if you use multiple providers), you can create a virtual management and administration LAN with tools like `tincd` or `gvpe`. These tools create a mesh VPN allowing all servers to communicate securely and privately. Your administration stations join this mesh VPN to get access to management and administration tasks on all your cloud servers.
For `gvpe` we have developed a [simple provisioning tool](https://github.com/netdata/netdata-demo-site/tree/master/gvpe) you may find handy (it includes statically compiled `gvpe` binaries for Linux and FreeBSD, and also a script to compile `gvpe` on your macOS system). We use this to create a management and administration LAN for all Netdata demo sites (spread all over the internet using multiple hosting providers).
For `gvpe` we have developed a [simple provisioning tool](https://github.com/netdata/netdata-demo-site/tree/master/gvpe) you may find handy (it includes statically compiled `gvpe` binaries for Linux and FreeBSD, and also a script to compile `gvpe` on your Mac). We use this to create a management and administration LAN for all Netdata demo sites (spread all over the internet using multiple hosting providers).
---

View file

@ -1,7 +1,6 @@
<!--
---
title: "The step-by-step Netdata tutorial"
date: 2020-03-31
custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/step-by-step/step-00.md
---
-->
@ -22,7 +21,7 @@ If you have monitoring experience, or would rather get straight into configuring
straight into code and configurations with our [getting started guide](../getting-started.md).
> This tutorial contains instructions for Netdata installed on a Linux system. Many of the instructions will work on
> other supported operating systems, like FreeBSD and macOS, but we can't make any guarantees.
> other supported operating systems, like FreeBSD and MacOS, but we can't make any guarantees.
## Where to go if you need help

View file

@ -1,7 +1,6 @@
<!--
---
title: "Step 4. The basics of configuring Netdata"
date: 2020-03-31
custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/step-by-step/step-04.md
---
-->
@ -62,7 +61,7 @@ an example file to your Netdata config directory and then allow you to edit it b
> change permanent](https://stackoverflow.com/questions/13046624/how-to-permanently-export-a-variable-in-linux).
Let's give it a shot. Navigate to your Netdata config directory. To use `edit-config` on `netdata.conf`, you need to
have permissions to edit the file. On Linux/macOS systems, you can usually use `sudo` to elevate your permissions.
have permissions to edit the file. On Linux/MacOS systems, you can usually use `sudo` to elevate your permissions.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different as found in the steps above

View file

@ -1,7 +1,6 @@
<!--
---
title: "Monitor Unbound DNS servers with Netdata"
date: 2020-03-31
custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/tutorials/collect-unbound-metrics.md
---
-->
@ -12,7 +11,7 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/tutorials/c
Labs. In v1.19 of Netdata, we release a completely refactored collector for collecting real-time metrics from Unbound
servers and displaying them in Netdata dashboards.
Unbound runs on FreeBSD, OpenBSD, NetBSD, macOS, Linux, and Windows, and supports DNS-over-TLS, which ensures that DNS
Unbound runs on FreeBSD, OpenBSD, NetBSD, MacOS, Linux, and Windows, and supports DNS-over-TLS, which ensures that DNS
queries and answers are all encrypted with TLS. In theory, that should reduce the risk of eavesdropping or
man-in-the-middle attacks when communicating to DNS servers.

View file

@ -1,7 +1,6 @@
<!--
---
title: "What is Netdata?"
date: 2020-03-31
custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/what-is-netdata.md
---
-->
@ -20,7 +19,7 @@ Netdata provides **unparalleled insights**, **in real-time**, of everything happ
_Netdata is **fast** and **efficient**, designed to permanently run on all systems (**physical** & **virtual** servers, **containers**, **IoT** devices), without disrupting their core function._
Netdata is **free, open-source software** and it currently runs on **Linux**, **FreeBSD**, and **macOS**.
Netdata is **free, open-source software** and it currently runs on **Linux**, **FreeBSD**, and **MacOS**.
---

View file

@ -75,23 +75,9 @@ void aws_kinesis_connector_worker(void *instance_p)
uv_mutex_lock(&instance->mutex);
uv_cond_wait(&instance->cond_var, &instance->mutex);
// reset the monitoring chart counters
stats->received_bytes =
stats->sent_bytes =
stats->sent_metrics =
stats->lost_metrics =
stats->receptions =
stats->transmission_successes =
stats->transmission_failures =
stats->data_lost_events =
stats->lost_bytes =
stats->reconnects = 0;
BUFFER *buffer = (BUFFER *)instance->buffer;
size_t buffer_len = buffer_strlen(buffer);
stats->buffered_bytes = buffer_len;
size_t sent = 0;
while (sent < buffer_len) {
@ -129,7 +115,7 @@ void aws_kinesis_connector_worker(void *instance_p)
connector_specific_data, connector_specific_config->stream_name, partition_key, first_char, record_len);
sent += record_len;
stats->transmission_successes++;
stats->chart_transmission_successes++;
size_t sent_bytes = 0, lost_bytes = 0;
@ -141,34 +127,30 @@ void aws_kinesis_connector_worker(void *instance_p)
"EXPORTING: failed to write data to database backend '%s'. Willing to write %zu bytes, wrote %zu bytes.",
instance->config.destination, sent_bytes, sent_bytes - lost_bytes);
stats->transmission_failures++;
stats->data_lost_events++;
stats->lost_bytes += lost_bytes;
stats->chart_transmission_failures++;
stats->chart_data_lost_events++;
stats->chart_lost_bytes += lost_bytes;
// estimate the number of lost metrics
stats->lost_metrics += (collected_number)(
stats->buffered_metrics *
stats->chart_lost_metrics += (collected_number)(
stats->chart_buffered_metrics *
(buffer_len && (lost_bytes > buffer_len) ? (double)lost_bytes / buffer_len : 1));
break;
} else {
stats->receptions++;
stats->chart_receptions++;
}
if (unlikely(netdata_exit))
break;
}
stats->sent_bytes += sent;
stats->chart_sent_bytes += sent;
if (likely(sent == buffer_len))
stats->sent_metrics = stats->buffered_metrics;
stats->chart_sent_metrics = stats->chart_buffered_metrics;
buffer_flush(buffer);
send_internal_metrics(instance);
stats->buffered_metrics = 0;
uv_mutex_unlock(&instance->mutex);
#ifdef UNIT_TESTING

View file

@ -35,11 +35,6 @@ void *exporting_main(void *ptr)
goto cleanup;
}
RRDSET *st_main_rusage = NULL;
RRDDIM *rd_main_user = NULL;
RRDDIM *rd_main_system = NULL;
create_main_rusage_chart(&st_main_rusage, &rd_main_user, &rd_main_system);
usec_t step_ut = localhost->rrd_update_every * USEC_PER_SEC;
heartbeat_t hb;
heartbeat_init(&hb);
@ -60,7 +55,10 @@ void *exporting_main(void *ptr)
break;
}
send_main_rusage(st_main_rusage, rd_main_user, rd_main_system);
if (send_internal_metrics(engine) != 0) {
error("EXPORTING: cannot send metrics for the operation of exporting engine");
break;
}
#ifdef UNIT_TESTING
break;

View file

@ -14,10 +14,10 @@
extern struct config exporting_config;
#define EXPORTING_UPDATE_EVERY_OPTION_NAME "update every"
#define EXPORTING_UPDATE_EVERY_DEFAULT 10
#define EXPORTING_UPDATE_EVERY_DEFAULT 10
typedef enum exporting_options {
EXPORTING_OPTION_NON = 0,
EXPORTING_OPTION_NONE = 0,
EXPORTING_SOURCE_DATA_AS_COLLECTED = (1 << 0),
EXPORTING_SOURCE_DATA_AVERAGE = (1 << 1),
@ -42,22 +42,10 @@ typedef enum exporting_options {
(instance->config.options & EXPORTING_OPTION_SEND_AUTOMATIC_LABELS && \
label->label_source != LABEL_SOURCE_NETDATA_CONF))
typedef enum exporting_connector_types {
EXPORTING_CONNECTOR_TYPE_UNKNOWN, // Invalid type
EXPORTING_CONNECTOR_TYPE_GRAPHITE, // Send plain text to Graphite
EXPORTING_CONNECTOR_TYPE_OPENTSDB_USING_TELNET, // Send data to OpenTSDB using telnet API
EXPORTING_CONNECTOR_TYPE_OPENTSDB_USING_HTTP, // Send data to OpenTSDB using HTTP API
EXPORTING_CONNECTOR_TYPE_JSON, // Stores the data using JSON.
EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE, // The user selected to use Prometheus backend
EXPORTING_CONNECTOR_TYPE_KINESIS, // Send message to AWS Kinesis
EXPORTING_CONNECTOR_TYPE_MONGODB, // Send data to MongoDB collection
EXPORTING_CONNECTOR_TYPE_NUM // Number of backend types
} EXPORTING_CONNECTOR_TYPE;
struct engine;
struct instance_config {
EXPORTING_CONNECTOR_TYPE type;
BACKEND_TYPE type;
const char *name;
const char *destination;
@ -99,42 +87,18 @@ struct engine_config {
};
struct stats {
collected_number buffered_metrics;
collected_number lost_metrics;
collected_number sent_metrics;
collected_number buffered_bytes;
collected_number lost_bytes;
collected_number sent_bytes;
collected_number received_bytes;
collected_number transmission_successes;
collected_number data_lost_events;
collected_number reconnects;
collected_number transmission_failures;
collected_number receptions;
int initialized;
RRDSET *st_metrics;
RRDDIM *rd_buffered_metrics;
RRDDIM *rd_lost_metrics;
RRDDIM *rd_sent_metrics;
RRDSET *st_bytes;
RRDDIM *rd_buffered_bytes;
RRDDIM *rd_lost_bytes;
RRDDIM *rd_sent_bytes;
RRDDIM *rd_received_bytes;
RRDSET *st_ops;
RRDDIM *rd_transmission_successes;
RRDDIM *rd_data_lost_events;
RRDDIM *rd_reconnects;
RRDDIM *rd_transmission_failures;
RRDDIM *rd_receptions;
RRDSET *st_rusage;
RRDDIM *rd_user;
RRDDIM *rd_system;
collected_number chart_buffered_metrics;
collected_number chart_lost_metrics;
collected_number chart_sent_metrics;
collected_number chart_buffered_bytes;
collected_number chart_received_bytes;
collected_number chart_sent_bytes;
collected_number chart_receptions;
collected_number chart_transmission_successes;
collected_number chart_transmission_failures;
collected_number chart_data_lost_events;
collected_number chart_lost_bytes;
collected_number chart_reconnects;
};
struct instance {
@ -186,12 +150,10 @@ struct engine {
struct instance *instance_root;
};
extern struct instance *prometheus_exporter_instance;
void *exporting_main(void *ptr);
struct engine *read_exporting_config();
EXPORTING_CONNECTOR_TYPE exporting_select_type(const char *type);
BACKEND_TYPE exporting_select_type(const char *type);
int init_connectors(struct engine *engine);
@ -217,17 +179,12 @@ int end_chart_formatting(struct engine *engine, RRDSET *st);
int end_host_formatting(struct engine *engine, RRDHOST *host);
int end_batch_formatting(struct engine *engine);
int flush_host_labels(struct instance *instance, RRDHOST *host);
int simple_connector_update_buffered_bytes(struct instance *instance);
int exporting_discard_response(BUFFER *buffer, struct instance *instance);
void simple_connector_receive_response(int *sock, struct instance *instance);
void simple_connector_send_buffer(int *sock, int *failures, struct instance *instance);
void simple_connector_worker(void *instance_p);
void create_main_rusage_chart(RRDSET **st_rusage, RRDDIM **rd_user, RRDDIM **rd_system);
void send_main_rusage(RRDSET *st_rusage, RRDDIM *rd_user, RRDDIM *rd_system);
void send_internal_metrics(struct instance *instance);
#include "exporting/prometheus/prometheus.h"
int send_internal_metrics(struct engine *engine);
#endif /* NETDATA_EXPORTING_ENGINE_H */

View file

@ -27,7 +27,7 @@ int init_graphite_instance(struct instance *instance)
instance->end_chart_formatting = NULL;
instance->end_host_formatting = flush_host_labels;
instance->end_batch_formatting = simple_connector_update_buffered_bytes;
instance->end_batch_formatting = NULL;
instance->send_header = NULL;
instance->check_response = exporting_discard_response;

View file

@ -32,35 +32,35 @@ int init_connectors(struct engine *engine)
instance->after = engine->now;
switch (instance->config.type) {
case EXPORTING_CONNECTOR_TYPE_GRAPHITE:
case BACKEND_TYPE_GRAPHITE:
if (init_graphite_instance(instance) != 0)
return 1;
break;
case EXPORTING_CONNECTOR_TYPE_JSON:
case BACKEND_TYPE_JSON:
if (init_json_instance(instance) != 0)
return 1;
break;
case EXPORTING_CONNECTOR_TYPE_OPENTSDB_USING_TELNET:
case BACKEND_TYPE_OPENTSDB_USING_TELNET:
if (init_opentsdb_telnet_instance(instance) != 0)
return 1;
break;
case EXPORTING_CONNECTOR_TYPE_OPENTSDB_USING_HTTP:
case BACKEND_TYPE_OPENTSDB_USING_HTTP:
if (init_opentsdb_http_instance(instance) != 0)
return 1;
break;
case EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE:
case BACKEND_TYPE_PROMETHEUS_REMOTE_WRITE:
#if ENABLE_PROMETHEUS_REMOTE_WRITE
if (init_prometheus_remote_write_instance(instance) != 0)
return 1;
#endif
break;
case EXPORTING_CONNECTOR_TYPE_KINESIS:
case BACKEND_TYPE_KINESIS:
#if HAVE_KINESIS
if (init_aws_kinesis_instance(instance) != 0)
return 1;
#endif
break;
case EXPORTING_CONNECTOR_TYPE_MONGODB:
case BACKEND_TYPE_MONGODB:
#if HAVE_MONGOC
if (init_mongodb_instance(instance) != 0)
return 1;
@ -77,7 +77,7 @@ int init_connectors(struct engine *engine)
error("EXPORTING: cannot create tread worker. uv_thread_create(): %s", uv_strerror(error));
return 1;
}
char threadname[NETDATA_THREAD_NAME_MAX + 1];
char threadname[NETDATA_THREAD_NAME_MAX+1];
snprintfz(threadname, NETDATA_THREAD_NAME_MAX, "EXPORTING-%zu", instance->index);
uv_thread_set_name_np(instance->thread, threadname);
}

View file

@ -27,7 +27,7 @@ int init_json_instance(struct instance *instance)
instance->end_chart_formatting = NULL;
instance->end_host_formatting = flush_host_labels;
instance->end_batch_formatting = simple_connector_update_buffered_bytes;
instance->end_batch_formatting = NULL;
instance->send_header = NULL;
instance->check_response = exporting_discard_response;

View file

@ -183,10 +183,8 @@ int format_batch_mongodb(struct instance *instance)
// ring buffer is full, reuse the oldest element
connector_specific_data->first_buffer = connector_specific_data->first_buffer->next;
free_bson(insert, connector_specific_data->last_buffer->documents_inserted);
connector_specific_data->total_documents_inserted -= connector_specific_data->last_buffer->documents_inserted;
stats->buffered_bytes -= connector_specific_data->last_buffer->buffered_bytes;
}
insert = callocz((size_t)stats->buffered_metrics, sizeof(bson_t *));
insert = callocz((size_t)stats->chart_buffered_metrics, sizeof(bson_t *));
connector_specific_data->last_buffer->insert = insert;
BUFFER *buffer = (BUFFER *)instance->buffer;
@ -195,7 +193,7 @@ int format_batch_mongodb(struct instance *instance)
size_t documents_inserted = 0;
while (*end && documents_inserted <= (size_t)stats->buffered_metrics) {
while (*end && documents_inserted <= (size_t)stats->chart_buffered_metrics) {
while (*end && *end != '\n')
end++;
@ -210,8 +208,7 @@ int format_batch_mongodb(struct instance *instance)
insert[documents_inserted] = bson_new_from_json((const uint8_t *)start, -1, &bson_error);
if (unlikely(!insert[documents_inserted])) {
error(
"EXPORTING: Failed creating a BSON document from a JSON string \"%s\" : %s", start, bson_error.message);
error("EXPORTING: %s", bson_error.message);
free_bson(insert, documents_inserted);
return 1;
}
@ -221,16 +218,8 @@ int format_batch_mongodb(struct instance *instance)
documents_inserted++;
}
stats->buffered_bytes += connector_specific_data->last_buffer->buffered_bytes = buffer_strlen(buffer);
buffer_flush(buffer);
// The stats->buffered_metrics is used in the MongoDB batch formatting as a variable for the number
// of metrics, added in the current iteration, so we are clearing it here. We will use the
// connector_specific_data->total_documents_inserted in the worker to show the statistics.
stats->buffered_metrics = 0;
connector_specific_data->total_documents_inserted += documents_inserted;
connector_specific_data->last_buffer->documents_inserted = documents_inserted;
connector_specific_data->last_buffer = connector_specific_data->last_buffer->next;
@ -257,25 +246,11 @@ void mongodb_connector_worker(void *instance_p)
uv_mutex_lock(&instance->mutex);
uv_cond_wait(&instance->cond_var, &instance->mutex);
// reset the monitoring chart counters
stats->received_bytes =
stats->sent_bytes =
stats->sent_metrics =
stats->lost_metrics =
stats->receptions =
stats->transmission_successes =
stats->transmission_failures =
stats->data_lost_events =
stats->lost_bytes =
stats->reconnects = 0;
bson_t **insert = connector_specific_data->first_buffer->insert;
size_t documents_inserted = connector_specific_data->first_buffer->documents_inserted;
size_t buffered_bytes = connector_specific_data->first_buffer->buffered_bytes;
connector_specific_data->first_buffer->insert = NULL;
connector_specific_data->first_buffer->documents_inserted = 0;
connector_specific_data->first_buffer->buffered_bytes = 0;
connector_specific_data->first_buffer = connector_specific_data->first_buffer->next;
uv_mutex_unlock(&instance->mutex);
@ -304,10 +279,9 @@ void mongodb_connector_worker(void *instance_p)
NULL,
NULL,
&bson_error))) {
stats->sent_metrics = documents_inserted;
stats->sent_bytes += data_size;
stats->transmission_successes++;
stats->receptions++;
stats->chart_sent_bytes += data_size;
stats->chart_transmission_successes++;
stats->chart_receptions++;
} else {
// oops! we couldn't send (all or some of the) data
error("EXPORTING: %s", bson_error.message);
@ -316,10 +290,10 @@ void mongodb_connector_worker(void *instance_p)
"Willing to write %zu bytes, wrote %zu bytes.",
instance->config.destination, data_size, 0UL);
stats->transmission_failures++;
stats->data_lost_events++;
stats->lost_bytes += buffered_bytes;
stats->lost_metrics += documents_inserted;
stats->chart_transmission_failures++;
stats->chart_data_lost_events++;
stats->chart_lost_bytes += data_size;
stats->chart_lost_metrics += stats->chart_buffered_metrics;
}
free_bson(insert, documents_inserted);
@ -327,18 +301,8 @@ void mongodb_connector_worker(void *instance_p)
if (unlikely(netdata_exit))
break;
uv_mutex_lock(&instance->mutex);
stats->buffered_metrics = connector_specific_data->total_documents_inserted;
send_internal_metrics(instance);
connector_specific_data->total_documents_inserted -= documents_inserted;
stats->buffered_metrics = 0;
stats->buffered_bytes -= buffered_bytes;
uv_mutex_unlock(&instance->mutex);
stats->chart_sent_bytes += data_size;
stats->chart_sent_metrics = stats->chart_buffered_metrics;
#ifdef UNIT_TESTING
break;

View file

@ -10,7 +10,6 @@
struct bson_buffer {
bson_t **insert;
size_t documents_inserted;
size_t buffered_bytes;
struct bson_buffer *next;
};
@ -19,8 +18,6 @@ struct mongodb_specific_data {
mongoc_client_t *client;
mongoc_collection_t *collection;
size_t total_documents_inserted;
bson_t **current_insert;
struct bson_buffer *first_buffer;
struct bson_buffer *last_buffer;

View file

@ -27,7 +27,7 @@ int init_opentsdb_telnet_instance(struct instance *instance)
instance->end_chart_formatting = NULL;
instance->end_host_formatting = flush_host_labels;
instance->end_batch_formatting = simple_connector_update_buffered_bytes;
instance->end_batch_formatting = NULL;
instance->send_header = NULL;
instance->check_response = exporting_discard_response;
@ -68,7 +68,7 @@ int init_opentsdb_http_instance(struct instance *instance)
instance->end_chart_formatting = NULL;
instance->end_host_formatting = flush_host_labels;
instance->end_batch_formatting = simple_connector_update_buffered_bytes;
instance->end_batch_formatting = NULL;
instance->send_header = NULL;
instance->check_response = exporting_discard_response;

View file

@ -206,7 +206,7 @@ int start_host_formatting(struct engine *engine, RRDHOST *host)
* Start chart formatting for every connector instance's buffer
*
* @param engine an engine data structure.
* @param st a chart.
* @param a chart.
* @return Returns 0 on success, 1 on failure.
*/
int start_chart_formatting(struct engine *engine, RRDSET *st)
@ -242,7 +242,7 @@ int metric_formatting(struct engine *engine, RRDDIM *rd)
error("EXPORTING: cannot format metric for %s", instance->config.name);
return 1;
}
instance->stats.buffered_metrics++;
instance->stats.chart_buffered_metrics++;
}
}
@ -389,19 +389,6 @@ int flush_host_labels(struct instance *instance, RRDHOST *host)
return 0;
}
/**
* Update stats for buffered bytes
*
* @param instance an instance data structure.
* @return Always returns 0.
*/
int simple_connector_update_buffered_bytes(struct instance *instance)
{
instance->stats.buffered_bytes = (collected_number)buffer_strlen((BUFFER *)(instance->buffer));
return 0;
}
/**
* Notify workers
*

View file

@ -7,16 +7,10 @@
// PROMETHEUS
// /api/v1/allmetrics?format=prometheus and /api/v1/allmetrics?format=prometheus_all_hosts
/**
* Check if a chart can be sent to an external databese
*
* @param instance an instance data structure.
* @param st a chart.
* @return Returns 1 if the chart can be sent, 0 otherwise.
*/
inline int can_send_rrdset(struct instance *instance, RRDSET *st)
{
RRDHOST *host = st->rrdhost;
(void)host;
if (unlikely(rrdset_flag_check(st, RRDSET_FLAG_BACKEND_IGNORE)))
return 0;
@ -30,7 +24,7 @@ inline int can_send_rrdset(struct instance *instance, RRDSET *st)
rrdset_flag_set(st, RRDSET_FLAG_BACKEND_IGNORE);
debug(
D_BACKEND,
"EXPORTING: not sending chart '%s' of host '%s', because it is disabled for exporting.",
"BACKEND: not sending chart '%s' of host '%s', because it is disabled for backends.",
st->id,
host->hostname);
return 0;
@ -40,7 +34,7 @@ inline int can_send_rrdset(struct instance *instance, RRDSET *st)
if (unlikely(!rrdset_is_available_for_backends(st))) {
debug(
D_BACKEND,
"EXPORTING: not sending chart '%s' of host '%s', because it is not available for exporting.",
"BACKEND: not sending chart '%s' of host '%s', because it is not available for backends.",
st->id,
host->hostname);
return 0;
@ -48,10 +42,10 @@ inline int can_send_rrdset(struct instance *instance, RRDSET *st)
if (unlikely(
st->rrd_memory_mode == RRD_MEMORY_MODE_NONE &&
!(EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED))) {
!(BACKEND_OPTIONS_DATA_SOURCE(instance->config.options) == BACKEND_SOURCE_DATA_AS_COLLECTED))) {
debug(
D_BACKEND,
"EXPORTING: not sending chart '%s' of host '%s' because its memory mode is '%s' and the exporting connector requires database access.",
"BACKEND: not sending chart '%s' of host '%s' because its memory mode is '%s' and the backend requires database access.",
st->id,
host->hostname,
rrd_memory_mode_name(host->rrd_memory_mode));
@ -69,19 +63,8 @@ static struct prometheus_server {
struct prometheus_server *next;
} *prometheus_server_root = NULL;
/**
* Get the last time when a Prometheus server scraped the Netdata Prometheus exporter.
*
* @param server the name of the Prometheus server.
* @param host a data collecting host.
* @param now actual time.
* @return Returns the last time when the server accessed Netdata, or 0 if it is the first occurrence.
*/
static inline time_t prometheus_server_last_access(const char *server, RRDHOST *host, time_t now)
{
#ifdef UNIT_TESTING
return 0;
#endif
static netdata_mutex_t prometheus_server_root_mutex = NETDATA_MUTEX_INITIALIZER;
uint32_t hash = simple_hash(server);
@ -110,14 +93,6 @@ static inline time_t prometheus_server_last_access(const char *server, RRDHOST *
return 0;
}
/**
* Copy and sanitize name.
*
* @param d a destination string.
* @param s a source sting.
* @param usable the number of characters to copy.
* @return Returns the length of the copied string.
*/
inline size_t prometheus_name_copy(char *d, const char *s, size_t usable)
{
size_t n;
@ -135,14 +110,6 @@ inline size_t prometheus_name_copy(char *d, const char *s, size_t usable)
return n;
}
/**
* Copy and sanitize label.
*
* @param d a destination string.
* @param s a source sting.
* @param usable the number of characters to copy.
* @return Returns the length of the copied string.
*/
inline size_t prometheus_label_copy(char *d, const char *s, size_t usable)
{
size_t n;
@ -164,15 +131,6 @@ inline size_t prometheus_label_copy(char *d, const char *s, size_t usable)
return n;
}
/**
* Copy and sanitize units.
*
* @param d a destination string.
* @param s a source sting.
* @param usable the number of characters to copy.
* @param showoldunits set this flag to 1 to show old (before v1.12) units.
* @return Returns the destination string.
*/
inline char *prometheus_units_copy(char *d, const char *s, size_t usable, int showoldunits)
{
const char *sorig = s;
@ -245,43 +203,6 @@ inline char *prometheus_units_copy(char *d, const char *s, size_t usable, int sh
return ret;
}
/**
* Format host labels for the Prometheus exporter
*
* @param instance an instance data structure.
* @param host a data collecting host.
*/
void format_host_labels_prometheus(struct instance *instance, RRDHOST *host)
{
if (unlikely(!sending_labels_configured(instance)))
return;
if (!instance->labels)
instance->labels = buffer_create(1024);
int count = 0;
rrdhost_check_rdlock(host);
netdata_rwlock_rdlock(&host->labels_rwlock);
for (struct label *label = host->labels; label; label = label->next) {
if (!should_send_label(instance, label))
continue;
char key[PROMETHEUS_ELEMENT_MAX + 1];
char value[PROMETHEUS_ELEMENT_MAX + 1];
prometheus_name_copy(key, label->key, PROMETHEUS_ELEMENT_MAX);
prometheus_label_copy(value, label->value, PROMETHEUS_ELEMENT_MAX);
if (*key && *value) {
if (count > 0)
buffer_strcat(instance->labels, ",");
buffer_sprintf(instance->labels, "%s=\"%s\"", key, value);
count++;
}
}
netdata_rwlock_unlock(&host->labels_rwlock);
}
struct host_variables_callback_options {
RRDHOST *host;
BUFFER *wb;
@ -294,13 +215,6 @@ struct host_variables_callback_options {
char name[PROMETHEUS_VARIABLE_MAX + 1];
};
/**
* Print host variables.
*
* @param rv a variable.
* @param data callback options.
* @return Returns 1 if the chart can be sent, 0 otherwise.
*/
static int print_host_variables(RRDVAR *rv, void *data)
{
struct host_variables_callback_options *opts = data;
@ -360,23 +274,14 @@ static int print_host_variables(RRDVAR *rv, void *data)
return 0;
}
/**
* Write metrics in Prometheus format to a buffer.
*
* @param instance an instance data structure.
* @param host a data collecting host.
* @param wb the buffer to fill with metrics.
* @param prefix a prefix for every metric.
* @param exporting_options options to configure what data is exported.
* @param allhosts set to 1 if host instance should be in the output for tags.
* @param output_options options to configure the format of the output.
*/
static void rrd_stats_api_v1_charts_allmetrics_prometheus(
struct instance *instance,
RRDHOST *host,
BUFFER *wb,
const char *prefix,
EXPORTING_OPTIONS exporting_options,
time_t after,
time_t before,
int allhosts,
PROMETHEUS_OUTPUT_OPTIONS output_options)
{
@ -385,33 +290,31 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(
char hostname[PROMETHEUS_ELEMENT_MAX + 1];
prometheus_label_copy(hostname, host->hostname, PROMETHEUS_ELEMENT_MAX);
format_host_labels_prometheus(instance, host);
if (output_options & PROMETHEUS_OUTPUT_TIMESTAMPS)
buffer_sprintf(
wb,
"netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1 %llu\n",
hostname,
host->program_name,
host->program_version,
now_realtime_usec() / USEC_PER_MS);
else
buffer_sprintf(
wb,
"netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1\n",
hostname,
host->program_name,
host->program_version);
char labels[PROMETHEUS_LABELS_MAX + 1] = "";
if (allhosts) {
if (instance->labels && buffer_tostring(instance->labels)) {
if (output_options & PROMETHEUS_OUTPUT_TIMESTAMPS)
buffer_sprintf(
wb,
"netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1 %llu\n",
hostname,
host->program_name,
host->program_version,
now_realtime_usec() / USEC_PER_MS);
else
buffer_sprintf(
wb,
"netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1\n",
hostname,
host->program_name,
host->program_version);
if (host->tags && *(host->tags)) {
if (output_options & PROMETHEUS_OUTPUT_TIMESTAMPS) {
buffer_sprintf(
wb,
"netdata_host_tags_info{instance=\"%s\",%s} 1 %llu\n",
hostname,
buffer_tostring(instance->labels),
host->tags,
now_realtime_usec() / USEC_PER_MS);
// deprecated, exists only for compatibility with older queries
@ -419,46 +322,50 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(
wb,
"netdata_host_tags{instance=\"%s\",%s} 1 %llu\n",
hostname,
buffer_tostring(instance->labels),
host->tags,
now_realtime_usec() / USEC_PER_MS);
} else {
buffer_sprintf(
wb, "netdata_host_tags_info{instance=\"%s\",%s} 1\n", hostname, buffer_tostring(instance->labels));
buffer_sprintf(wb, "netdata_host_tags_info{instance=\"%s\",%s} 1\n", hostname, host->tags);
// deprecated, exists only for compatibility with older queries
buffer_sprintf(
wb, "netdata_host_tags{instance=\"%s\",%s} 1\n", hostname, buffer_tostring(instance->labels));
buffer_sprintf(wb, "netdata_host_tags{instance=\"%s\",%s} 1\n", hostname, host->tags);
}
}
snprintfz(labels, PROMETHEUS_LABELS_MAX, ",instance=\"%s\"", hostname);
} else {
if (instance->labels && buffer_tostring(instance->labels)) {
if (output_options & PROMETHEUS_OUTPUT_TIMESTAMPS)
buffer_sprintf(
wb,
"netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1 %llu\n",
hostname,
host->program_name,
host->program_version,
now_realtime_usec() / USEC_PER_MS);
else
buffer_sprintf(
wb,
"netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1\n",
hostname,
host->program_name,
host->program_version);
if (host->tags && *(host->tags)) {
if (output_options & PROMETHEUS_OUTPUT_TIMESTAMPS) {
buffer_sprintf(
wb,
"netdata_host_tags_info{%s} 1 %llu\n",
buffer_tostring(instance->labels),
now_realtime_usec() / USEC_PER_MS);
wb, "netdata_host_tags_info{%s} 1 %llu\n", host->tags, now_realtime_usec() / USEC_PER_MS);
// deprecated, exists only for compatibility with older queries
buffer_sprintf(
wb,
"netdata_host_tags{%s} 1 %llu\n",
buffer_tostring(instance->labels),
now_realtime_usec() / USEC_PER_MS);
buffer_sprintf(wb, "netdata_host_tags{%s} 1 %llu\n", host->tags, now_realtime_usec() / USEC_PER_MS);
} else {
buffer_sprintf(wb, "netdata_host_tags_info{%s} 1\n", buffer_tostring(instance->labels));
buffer_sprintf(wb, "netdata_host_tags_info{%s} 1\n", host->tags);
// deprecated, exists only for compatibility with older queries
buffer_sprintf(wb, "netdata_host_tags{%s} 1\n", buffer_tostring(instance->labels));
buffer_sprintf(wb, "netdata_host_tags{%s} 1\n", host->tags);
}
}
}
if (instance->labels)
buffer_flush(instance->labels);
// send custom variables set for the host
if (output_options & PROMETHEUS_OUTPUT_VARIABLES) {
struct host_variables_callback_options opts = { .host = host,
@ -476,20 +383,20 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(
RRDSET *st;
rrdset_foreach_read(st, host)
{
char chart[PROMETHEUS_ELEMENT_MAX + 1];
char context[PROMETHEUS_ELEMENT_MAX + 1];
char family[PROMETHEUS_ELEMENT_MAX + 1];
prometheus_label_copy(
chart, (output_options & PROMETHEUS_OUTPUT_NAMES && st->name) ? st->name : st->id, PROMETHEUS_ELEMENT_MAX);
prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
if (likely(can_send_rrdset(instance, st))) {
rrdset_rdlock(st);
char chart[PROMETHEUS_ELEMENT_MAX + 1];
char context[PROMETHEUS_ELEMENT_MAX + 1];
char family[PROMETHEUS_ELEMENT_MAX + 1];
char units[PROMETHEUS_ELEMENT_MAX + 1] = "";
prometheus_label_copy(
chart, (output_options & PROMETHEUS_OUTPUT_NAMES && st->name) ? st->name : st->id, PROMETHEUS_ELEMENT_MAX);
prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
int as_collected = (EXPORTING_OPTIONS_DATA_SOURCE(exporting_options) == EXPORTING_SOURCE_DATA_AS_COLLECTED);
int homogeneous = 1;
if (as_collected) {
@ -526,7 +433,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(
if (as_collected) {
// we need as-collected / raw data
if (unlikely(rd->last_collected_time.tv_sec < instance->after))
if (unlikely(rd->last_collected_time.tv_sec < after))
continue;
const char *t = "gauge", *h = "gives";
@ -655,9 +562,8 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(
} else {
// we need average or sum of the data
time_t first_time = instance->after;
time_t last_time = instance->before;
calculated_number value = exporting_calculate_value_from_stored_data(instance, rd, &last_time);
time_t first_t = after, last_t = before;
calculated_number value = exporting_calculate_value_from_stored_data(instance, rd, &last_t);
if (!isnan(value) && !isinf(value)) {
if (EXPORTING_OPTIONS_DATA_SOURCE(exporting_options) == EXPORTING_SOURCE_DATA_AVERAGE)
@ -680,8 +586,8 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(
suffix,
(output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id,
st->units,
(unsigned long long)first_time,
(unsigned long long)last_time);
(unsigned long long)first_t,
(unsigned long long)last_t);
if (unlikely(output_options & PROMETHEUS_OUTPUT_TYPES))
buffer_sprintf(wb, "# COMMENT TYPE %s_%s%s%s gauge\n", prefix, context, units, suffix);
@ -700,7 +606,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(
dimension,
labels,
value,
last_time * MSEC_PER_SEC);
last_t * MSEC_PER_SEC);
else
buffer_sprintf(
wb,
@ -727,18 +633,6 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(
rrdhost_unlock(host);
}
/**
* Get the last time time when a server accessed Netdata. Write information about an API request to a buffer.
*
* @param instance an instance data structure.
* @param host a data collecting host.
* @param wb the buffer to write to.
* @param exporting_options options to configure what data is exported.
* @param server the name of a Prometheus server..
* @param now actual time.
* @param output_options options to configure the format of the output.
* @return Returns the last time when the server accessed Netdata.
*/
static inline time_t prometheus_preparation(
struct instance *instance,
RRDHOST *host,
@ -755,13 +649,13 @@ static inline time_t prometheus_preparation(
int first_seen = 0;
if (!after) {
after = now - instance->config.update_every;
after = now - instance->engine->config.update_every;
first_seen = 1;
}
if (after > now) {
// oops! this should never happen
after = now - instance->config.update_every;
after = now - instance->engine->config.update_every;
}
if (output_options & PROMETHEUS_OUTPUT_HELP) {
@ -791,17 +685,8 @@ static inline time_t prometheus_preparation(
return after;
}
/**
* Write metrics and auxiliary information for one host to a buffer.
*
* @param host a data collecting host.
* @param wb the buffer to write to.
* @param server the name of a Prometheus server.
* @param prefix a prefix for every metric.
* @param exporting_options options to configure what data is exported.
* @param output_options options to configure the format of the output.
*/
void rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(
struct instance *instance,
RRDHOST *host,
BUFFER *wb,
const char *server,
@ -809,36 +694,17 @@ void rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(
EXPORTING_OPTIONS exporting_options,
PROMETHEUS_OUTPUT_OPTIONS output_options)
{
if (unlikely(!prometheus_exporter_instance))
return;
prometheus_exporter_instance->before = now_realtime_sec();
time_t before = now_realtime_sec();
// we start at the point we had stopped before
prometheus_exporter_instance->after = prometheus_preparation(
prometheus_exporter_instance,
host,
wb,
exporting_options,
server,
prometheus_exporter_instance->before,
output_options);
time_t after = prometheus_preparation(instance, host, wb, exporting_options, server, before, output_options);
rrd_stats_api_v1_charts_allmetrics_prometheus(
prometheus_exporter_instance, host, wb, prefix, exporting_options, 0, output_options);
instance, host, wb, prefix, exporting_options, after, before, 0, output_options);
}
/**
* Write metrics and auxiliary information for all hosts to a buffer.
*
* @param host a data collecting host.
* @param wb the buffer to write to.
* @param server the name of a Prometheus server.
* @param prefix a prefix for every metric.
* @param exporting_options options to configure what data is exported.
* @param output_options options to configure the format of the output.
*/
void rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(
struct instance *instance,
RRDHOST *host,
BUFFER *wb,
const char *server,
@ -846,26 +712,16 @@ void rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(
EXPORTING_OPTIONS exporting_options,
PROMETHEUS_OUTPUT_OPTIONS output_options)
{
if (unlikely(!prometheus_exporter_instance))
return;
prometheus_exporter_instance->before = now_realtime_sec();
time_t before = now_realtime_sec();
// we start at the point we had stopped before
prometheus_exporter_instance->after = prometheus_preparation(
prometheus_exporter_instance,
host,
wb,
exporting_options,
server,
prometheus_exporter_instance->before,
output_options);
time_t after = prometheus_preparation(instance, host, wb, exporting_options, server, before, output_options);
rrd_rdlock();
rrdhost_foreach_read(host)
{
rrd_stats_api_v1_charts_allmetrics_prometheus(
prometheus_exporter_instance, host, wb, prefix, exporting_options, 1, output_options);
instance, host, wb, prefix, exporting_options, after, before, 1, output_options);
}
rrd_unlock();
}

View file

@ -23,10 +23,10 @@ typedef enum prometheus_output_flags {
} PROMETHEUS_OUTPUT_OPTIONS;
extern void rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(
RRDHOST *host, BUFFER *wb, const char *server, const char *prefix,
struct instance *instance, RRDHOST *host, BUFFER *wb, const char *server, const char *prefix,
EXPORTING_OPTIONS exporting_options, PROMETHEUS_OUTPUT_OPTIONS output_options);
extern void rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(
RRDHOST *host, BUFFER *wb, const char *server, const char *prefix,
struct instance *instance, RRDHOST *host, BUFFER *wb, const char *server, const char *prefix,
EXPORTING_OPTIONS exporting_options, PROMETHEUS_OUTPUT_OPTIONS output_options);
int can_send_rrdset(struct instance *instance, RRDSET *st);
@ -34,6 +34,4 @@ size_t prometheus_name_copy(char *d, const char *s, size_t usable);
size_t prometheus_label_copy(char *d, const char *s, size_t usable);
char *prometheus_units_copy(char *d, const char *s, size_t usable, int showoldunits);
void format_host_labels_prometheus(struct instance *instance, RRDHOST *host);
#endif //NETDATA_EXPORTING_PROMETHEUS_H

View file

@ -314,7 +314,7 @@ int format_batch_prometheus_remote_write(struct instance *instance)
return 1;
}
buffer->len = data_size;
instance->stats.buffered_bytes = (collected_number)buffer_strlen(buffer);
instance->stats.chart_buffered_bytes = (collected_number)buffer_strlen(buffer);
return 0;
}

View file

@ -2,19 +2,18 @@
#include "exporting_engine.h"
struct config exporting_config = { .first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
struct config exporting_config = {.first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
.index = {.avl_tree = {.root = NULL, .compar = appconfig_section_compare},
.rwlock = AVL_LOCK_INITIALIZER}};
struct instance *prometheus_exporter_instance = NULL;
static _CONNECTOR_INSTANCE *find_instance(const char *section)
{
_CONNECTOR_INSTANCE *local_ci;
local_ci = add_connector_instance(NULL, NULL); // Get root section
local_ci = add_connector_instance(NULL, NULL); // Get root section
if (unlikely(!local_ci))
return local_ci;
@ -39,10 +38,12 @@ char *expconfig_get(struct config *root, const char *section, const char *name,
local_ci = find_instance(section);
if (!local_ci)
return NULL; // TODO: Check if it is meaningful to return default_value
return NULL; // TODO: Check if it is meaningful to return default_value
return appconfig_get(
root, local_ci->instance_name, name,
root,
local_ci->instance_name,
name,
appconfig_get(
root, local_ci->connector_name, name, appconfig_get(root, CONFIG_SECTION_EXPORTING, name, default_value)));
}
@ -57,12 +58,16 @@ int expconfig_get_boolean(struct config *root, const char *section, const char *
local_ci = find_instance(section);
if (!local_ci)
return 0; // TODO: Check if it is meaningful to return default_value
return 0; // TODO: Check if it is meaningful to return default_value
return appconfig_get_boolean(
root, local_ci->instance_name, name,
root,
local_ci->instance_name,
name,
appconfig_get_boolean(
root, local_ci->connector_name, name,
root,
local_ci->connector_name,
name,
appconfig_get_boolean(root, CONFIG_SECTION_EXPORTING, name, default_value)));
}
@ -76,12 +81,16 @@ long long expconfig_get_number(struct config *root, const char *section, const c
local_ci = find_instance(section);
if (!local_ci)
return 0; // TODO: Check if it is meaningful to return default_value
return 0; // TODO: Check if it is meaningful to return default_value
return appconfig_get_number(
root, local_ci->instance_name, name,
root,
local_ci->instance_name,
name,
appconfig_get_number(
root, local_ci->connector_name, name,
root,
local_ci->connector_name,
name,
appconfig_get_number(root, CONFIG_SECTION_EXPORTING, name, default_value)));
}
@ -99,7 +108,7 @@ int get_connector_instance(struct connector_instance *target_ci)
static _CONNECTOR_INSTANCE *local_ci = NULL;
_CONNECTOR_INSTANCE *global_connector_instance;
global_connector_instance = find_instance(NULL); // Fetch head of instances
global_connector_instance = find_instance(NULL); // Fetch head of instances
if (unlikely(!global_connector_instance))
return 0;
@ -131,39 +140,40 @@ int get_connector_instance(struct connector_instance *target_ci)
*
* @return It returns the connector id.
*/
EXPORTING_CONNECTOR_TYPE exporting_select_type(const char *type)
BACKEND_TYPE exporting_select_type(const char *type)
{
if (!strcmp(type, "graphite") || !strcmp(type, "graphite:plaintext")) {
return EXPORTING_CONNECTOR_TYPE_GRAPHITE;
return BACKEND_TYPE_GRAPHITE;
} else if (!strcmp(type, "opentsdb") || !strcmp(type, "opentsdb:telnet")) {
return EXPORTING_CONNECTOR_TYPE_OPENTSDB_USING_TELNET;
return BACKEND_TYPE_OPENTSDB_USING_TELNET;
} else if (!strcmp(type, "opentsdb:http") || !strcmp(type, "opentsdb:https")) {
return EXPORTING_CONNECTOR_TYPE_OPENTSDB_USING_HTTP;
return BACKEND_TYPE_OPENTSDB_USING_HTTP;
} else if (!strcmp(type, "json") || !strcmp(type, "json:plaintext")) {
return EXPORTING_CONNECTOR_TYPE_JSON;
return BACKEND_TYPE_JSON;
} else if (!strcmp(type, "prometheus_remote_write")) {
return EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE;
return BACKEND_TYPE_PROMETHEUS_REMOTE_WRITE;
} else if (!strcmp(type, "kinesis") || !strcmp(type, "kinesis:plaintext")) {
return EXPORTING_CONNECTOR_TYPE_KINESIS;
return BACKEND_TYPE_KINESIS;
} else if (!strcmp(type, "mongodb") || !strcmp(type, "mongodb:plaintext"))
return EXPORTING_CONNECTOR_TYPE_MONGODB;
return BACKEND_TYPE_MONGODB;
return EXPORTING_CONNECTOR_TYPE_UNKNOWN;
return BACKEND_TYPE_UNKNOWN;
}
EXPORTING_OPTIONS exporting_parse_data_source(const char *data_source, EXPORTING_OPTIONS exporting_options)
{
if (!strcmp(data_source, "raw") || !strcmp(data_source, "as collected") || !strcmp(data_source, "as-collected") ||
!strcmp(data_source, "as_collected") || !strcmp(data_source, "ascollected")) {
EXPORTING_OPTIONS exporting_parse_data_source(const char *data_source, EXPORTING_OPTIONS exporting_options) {
if(!strcmp(data_source, "raw") || !strcmp(data_source, "as collected") || !strcmp(data_source, "as-collected") || !strcmp(data_source, "as_collected") || !strcmp(data_source, "ascollected")) {
exporting_options |= EXPORTING_SOURCE_DATA_AS_COLLECTED;
exporting_options &= ~(EXPORTING_OPTIONS_SOURCE_BITS ^ EXPORTING_SOURCE_DATA_AS_COLLECTED);
} else if (!strcmp(data_source, "average")) {
}
else if(!strcmp(data_source, "average")) {
exporting_options |= EXPORTING_SOURCE_DATA_AVERAGE;
exporting_options &= ~(EXPORTING_OPTIONS_SOURCE_BITS ^ EXPORTING_SOURCE_DATA_AVERAGE);
} else if (!strcmp(data_source, "sum") || !strcmp(data_source, "volume")) {
}
else if(!strcmp(data_source, "sum") || !strcmp(data_source, "volume")) {
exporting_options |= EXPORTING_SOURCE_DATA_SUM;
exporting_options &= ~(EXPORTING_OPTIONS_SOURCE_BITS ^ EXPORTING_SOURCE_DATA_SUM);
} else {
}
else {
error("EXPORTING: invalid data data_source method '%s'.", data_source);
}
@ -185,7 +195,7 @@ struct engine *read_exporting_config()
static struct engine *engine = NULL;
struct connector_instance_list {
struct connector_instance local_ci;
EXPORTING_CONNECTOR_TYPE backend_type;
BACKEND_TYPE backend_type;
struct connector_instance_list *next;
};
@ -210,46 +220,6 @@ struct engine *read_exporting_config()
freez(filename);
#define prometheus_config_get(name, value) \
appconfig_get( \
&exporting_config, CONFIG_SECTION_PROMETHEUS, name, \
appconfig_get(&exporting_config, CONFIG_SECTION_EXPORTING, name, value))
#define prometheus_config_get_number(name, value) \
appconfig_get_number( \
&exporting_config, CONFIG_SECTION_PROMETHEUS, name, \
appconfig_get_number(&exporting_config, CONFIG_SECTION_EXPORTING, name, value))
#define prometheus_config_get_boolean(name, value) \
appconfig_get_boolean( \
&exporting_config, CONFIG_SECTION_PROMETHEUS, name, \
appconfig_get_boolean(&exporting_config, CONFIG_SECTION_EXPORTING, name, value))
if (!prometheus_exporter_instance) {
prometheus_exporter_instance = callocz(1, sizeof(struct instance));
prometheus_exporter_instance->config.update_every =
prometheus_config_get_number(EXPORTING_UPDATE_EVERY_OPTION_NAME, EXPORTING_UPDATE_EVERY_DEFAULT);
if (prometheus_config_get_boolean("send names instead of ids", CONFIG_BOOLEAN_YES))
prometheus_exporter_instance->config.options |= EXPORTING_OPTION_SEND_NAMES;
else
prometheus_exporter_instance->config.options &= ~EXPORTING_OPTION_SEND_NAMES;
if (prometheus_config_get_boolean("send configured labels", CONFIG_BOOLEAN_YES))
prometheus_exporter_instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
else
prometheus_exporter_instance->config.options &= ~EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
if (prometheus_config_get_boolean("send automatic labels", CONFIG_BOOLEAN_NO))
prometheus_exporter_instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
else
prometheus_exporter_instance->config.options &= ~EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
prometheus_exporter_instance->config.charts_pattern =
simple_pattern_create(prometheus_config_get("send charts matching", "*"), NULL, SIMPLE_PATTERN_EXACT);
prometheus_exporter_instance->config.hosts_pattern = simple_pattern_create(
prometheus_config_get("send hosts matching", "localhost *"), NULL, SIMPLE_PATTERN_EXACT);
}
// TODO: change BACKEND to EXPORTING
while (get_connector_instance(&local_ci)) {
info("Processing connector instance (%s)", local_ci.instance_name);
@ -257,7 +227,8 @@ struct engine *read_exporting_config()
if (exporter_get_boolean(local_ci.instance_name, "enabled", 0)) {
info(
"Instance (%s) on connector (%s) is enabled and scheduled for activation",
local_ci.instance_name, local_ci.connector_name);
local_ci.instance_name,
local_ci.connector_name);
tmp_ci_list = (struct connector_instance_list *)callocz(1, sizeof(struct connector_instance_list));
memcpy(&tmp_ci_list->local_ci, &local_ci, sizeof(local_ci));
@ -281,8 +252,8 @@ struct engine *read_exporting_config()
engine->config.hostname =
strdupz(exporter_get(CONFIG_SECTION_EXPORTING, "hostname", netdata_configured_hostname));
engine->config.prefix = strdupz(exporter_get(CONFIG_SECTION_EXPORTING, "prefix", "netdata"));
engine->config.update_every = exporter_get_number(
CONFIG_SECTION_EXPORTING, EXPORTING_UPDATE_EVERY_OPTION_NAME, EXPORTING_UPDATE_EVERY_DEFAULT);
engine->config.update_every =
exporter_get_number(CONFIG_SECTION_EXPORTING, EXPORTING_UPDATE_EVERY_OPTION_NAME, EXPORTING_UPDATE_EVERY_DEFAULT);
}
while (tmp_ci_list) {
@ -291,27 +262,27 @@ struct engine *read_exporting_config()
info("Instance %s on %s", tmp_ci_list->local_ci.instance_name, tmp_ci_list->local_ci.connector_name);
if (tmp_ci_list->backend_type == EXPORTING_CONNECTOR_TYPE_UNKNOWN) {
if (tmp_ci_list->backend_type == BACKEND_TYPE_UNKNOWN) {
error("Unknown exporting connector type");
goto next_connector_instance;
}
#ifndef ENABLE_PROMETHEUS_REMOTE_WRITE
if (tmp_ci_list->backend_type == EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE) {
if (tmp_ci_list->backend_type == BACKEND_TYPE_PROMETHEUS_REMOTE_WRITE) {
error("Prometheus Remote Write support isn't compiled");
goto next_connector_instance;
}
#endif
#ifndef HAVE_KINESIS
if (tmp_ci_list->backend_type == EXPORTING_CONNECTOR_TYPE_KINESIS) {
if (tmp_ci_list->backend_type == BACKEND_TYPE_KINESIS) {
error("AWS Kinesis support isn't compiled");
goto next_connector_instance;
}
#endif
#ifndef HAVE_MONGOC
if (tmp_ci_list->backend_type == EXPORTING_CONNECTOR_TYPE_MONGODB) {
if (tmp_ci_list->backend_type == BACKEND_TYPE_MONGODB) {
error("MongoDB support isn't compiled");
goto next_connector_instance;
}
@ -328,31 +299,41 @@ struct engine *read_exporting_config()
tmp_instance->config.name = strdupz(tmp_ci_list->local_ci.instance_name);
tmp_instance->config.destination = strdupz(exporter_get(instance_name, "destination", "localhost"));
tmp_instance->config.destination =
strdupz(exporter_get(instance_name, "destination", "localhost"));
tmp_instance->config.update_every =
exporter_get_number(instance_name, EXPORTING_UPDATE_EVERY_OPTION_NAME, EXPORTING_UPDATE_EVERY_DEFAULT);
tmp_instance->config.buffer_on_failures = exporter_get_number(instance_name, "buffer on failures", 10);
tmp_instance->config.buffer_on_failures =
exporter_get_number(instance_name, "buffer on failures", 10);
tmp_instance->config.timeoutms = exporter_get_number(instance_name, "timeout ms", 10000);
tmp_instance->config.timeoutms =
exporter_get_number(instance_name, "timeout ms", 10000);
tmp_instance->config.charts_pattern =
simple_pattern_create(exporter_get(instance_name, "send charts matching", "*"), NULL, SIMPLE_PATTERN_EXACT);
tmp_instance->config.charts_pattern = simple_pattern_create(
exporter_get(instance_name, "send charts matching", "*"),
NULL,
SIMPLE_PATTERN_EXACT);
tmp_instance->config.hosts_pattern = simple_pattern_create(
exporter_get(instance_name, "send hosts matching", "localhost *"), NULL, SIMPLE_PATTERN_EXACT);
exporter_get(instance_name, "send hosts matching", "localhost *"),
NULL,
SIMPLE_PATTERN_EXACT);
char *data_source = exporter_get(instance_name, "data source", "average");
char *data_source =
exporter_get(instance_name, "data source", "average");
tmp_instance->config.options = exporting_parse_data_source(data_source, tmp_instance->config.options);
if (exporter_get_boolean(instance_name, "send configured labels", CONFIG_BOOLEAN_YES))
if (exporter_get_boolean(
instance_name, "send configured labels", CONFIG_BOOLEAN_YES))
tmp_instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
else
tmp_instance->config.options &= ~EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
if (exporter_get_boolean(instance_name, "send automatic labels", CONFIG_BOOLEAN_NO))
if (exporter_get_boolean(
instance_name, "send automatic labels", CONFIG_BOOLEAN_NO))
tmp_instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
else
tmp_instance->config.options &= ~EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
@ -362,30 +343,33 @@ struct engine *read_exporting_config()
else
tmp_instance->config.options &= ~EXPORTING_OPTION_SEND_NAMES;
if (tmp_instance->config.type == EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE) {
if (tmp_instance->config.type == BACKEND_TYPE_PROMETHEUS_REMOTE_WRITE) {
struct prometheus_remote_write_specific_config *connector_specific_config =
callocz(1, sizeof(struct prometheus_remote_write_specific_config));
tmp_instance->config.connector_specific_config = connector_specific_config;
connector_specific_config->remote_write_path =
strdupz(exporter_get(instance_name, "remote write URL path", "/receive"));
connector_specific_config->remote_write_path = strdupz(exporter_get(
instance_name, "remote write URL path", "/receive"));
}
if (tmp_instance->config.type == EXPORTING_CONNECTOR_TYPE_KINESIS) {
if (tmp_instance->config.type == BACKEND_TYPE_KINESIS) {
struct aws_kinesis_specific_config *connector_specific_config =
callocz(1, sizeof(struct aws_kinesis_specific_config));
tmp_instance->config.connector_specific_config = connector_specific_config;
connector_specific_config->stream_name = strdupz(exporter_get(instance_name, "stream name", "netdata"));
connector_specific_config->stream_name = strdupz(exporter_get(
instance_name, "stream name", "netdata"));
connector_specific_config->auth_key_id = strdupz(exporter_get(instance_name, "aws_access_key_id", ""));
connector_specific_config->auth_key_id = strdupz(exporter_get(
instance_name, "aws_access_key_id", ""));
connector_specific_config->secure_key = strdupz(exporter_get(instance_name, "aws_secret_access_key", ""));
connector_specific_config->secure_key = strdupz(exporter_get(
instance_name, "aws_secret_access_key", ""));
}
if (tmp_instance->config.type == EXPORTING_CONNECTOR_TYPE_MONGODB) {
if (tmp_instance->config.type == BACKEND_TYPE_MONGODB) {
struct mongodb_specific_config *connector_specific_config =
callocz(1, sizeof(struct mongodb_specific_config));
@ -409,13 +393,14 @@ struct engine *read_exporting_config()
#endif
if (unlikely(!exporting_config_exists) && !engine->config.hostname) {
engine->config.hostname = strdupz(config_get(instance_name, "hostname", netdata_configured_hostname));
engine->config.hostname =
strdupz(config_get(instance_name, "hostname", netdata_configured_hostname));
engine->config.prefix = strdupz(config_get(instance_name, "prefix", "netdata"));
engine->config.update_every =
config_get_number(instance_name, EXPORTING_UPDATE_EVERY_OPTION_NAME, EXPORTING_UPDATE_EVERY_DEFAULT);
}
next_connector_instance:
next_connector_instance:
tmp_ci_list1 = tmp_ci_list->next;
freez(tmp_ci_list);
tmp_ci_list = tmp_ci_list1;

View file

@ -57,8 +57,8 @@ void simple_connector_receive_response(int *sock, struct instance *instance)
if (likely(r > 0)) {
// we received some data
response->len += r;
stats->received_bytes += r;
stats->receptions++;
stats->chart_received_bytes += r;
stats->chart_receptions++;
} else if (r == 0) {
error("EXPORTING: '%s' closed the socket", instance->config.destination);
close(*sock);
@ -109,9 +109,9 @@ void simple_connector_send_buffer(int *sock, int *failures, struct instance *ins
if(written != -1 && (size_t)written == len) {
// we sent the data successfully
stats->transmission_successes++;
stats->sent_bytes += written;
stats->sent_metrics = stats->buffered_metrics;
stats->chart_transmission_successes++;
stats->chart_sent_bytes += written;
stats->chart_sent_metrics = stats->chart_buffered_metrics;
// reset the failures count
*failures = 0;
@ -126,10 +126,10 @@ void simple_connector_send_buffer(int *sock, int *failures, struct instance *ins
instance->config.destination,
len,
written);
stats->transmission_failures++;
stats->chart_transmission_failures++;
if(written != -1)
stats->sent_bytes += written;
stats->chart_sent_bytes += written;
// increment the counter we check for data loss
(*failures)++;
@ -160,19 +160,6 @@ void simple_connector_worker(void *instance_p)
int failures = 0;
while(!netdata_exit) {
// reset the monitoring chart counters
stats->received_bytes =
stats->sent_bytes =
stats->sent_metrics =
stats->lost_metrics =
stats->receptions =
stats->transmission_successes =
stats->transmission_failures =
stats->data_lost_events =
stats->lost_bytes =
stats->reconnects = 0;
// ------------------------------------------------------------------------
// if we are connected, receive a response, without blocking
@ -192,7 +179,7 @@ void simple_connector_worker(void *instance_p)
&reconnects,
NULL,
0);
stats->reconnects += reconnects;
stats->chart_reconnects += reconnects;
}
if(unlikely(netdata_exit)) break;
@ -207,31 +194,12 @@ void simple_connector_worker(void *instance_p)
simple_connector_send_buffer(&sock, &failures, instance);
} else {
error("EXPORTING: failed to update '%s'", instance->config.destination);
stats->transmission_failures++;
stats->chart_transmission_failures++;
// increment the counter we check for data loss
failures++;
}
BUFFER *buffer = instance->buffer;
if (failures > instance->config.buffer_on_failures) {
stats->lost_bytes += buffer_strlen(buffer);
error(
"EXPORTING: connector instance %s reached %d exporting failures. "
"Flushing buffers to protect this host - this results in data loss on server '%s'",
instance->config.name, failures, instance->config.destination);
buffer_flush(buffer);
failures = 0;
stats->data_lost_events++;
stats->lost_metrics = stats->buffered_metrics;
}
send_internal_metrics(instance);
if(likely(buffer_strlen(buffer) == 0))
stats->buffered_metrics = 0;
uv_mutex_unlock(&instance->mutex);
#ifdef UNIT_TESTING

View file

@ -3,170 +3,16 @@
#include "exporting_engine.h"
/**
* Create a chart for the main exporting thread CPU usage
*
* @param st_rusage the thead CPU usage chart
* @param rd_user a dimension for user CPU usage
* @param rd_system a dimension for system CPU usage
*/
void create_main_rusage_chart(RRDSET **st_rusage, RRDDIM **rd_user, RRDDIM **rd_system)
{
if (*st_rusage && *rd_user && *rd_system)
return;
*st_rusage = rrdset_create_localhost(
"netdata", "exporting_main_thread_cpu", NULL, "exporting", NULL, "Netdata Main Exporting Thread CPU Usage",
"milliseconds/s", "exporting", NULL, 130600, localhost->rrd_update_every, RRDSET_TYPE_STACKED);
*rd_user = rrddim_add(*st_rusage, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
*rd_system = rrddim_add(*st_rusage, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
}
/**
* Send the main exporting thread CPU usage
*
* @param st_rusage a thead CPU usage chart
* @param rd_user a dimension for user CPU usage
* @param rd_system a dimension for system CPU usage
*/
void send_main_rusage(RRDSET *st_rusage, RRDDIM *rd_user, RRDDIM *rd_system)
{
struct rusage thread;
getrusage(RUSAGE_THREAD, &thread);
if (likely(st_rusage->counter_done))
rrdset_next(st_rusage);
rrddim_set_by_pointer(st_rusage, rd_user, thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec);
rrddim_set_by_pointer(st_rusage, rd_system, thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec);
rrdset_done(st_rusage);
}
/**
* Send internal metrics for an instance
* Send internal metrics
*
* Send performance metrics for the operation of exporting engine itself to the Netdata database.
*
* @param instance an instance data structure.
* @param engine an engine data structure.
* @return Returns 0 on success, 1 on failure.
*/
void send_internal_metrics(struct instance *instance)
int send_internal_metrics(struct engine *engine)
{
struct stats *stats = &instance->stats;
(void)engine;
// ------------------------------------------------------------------------
// create charts for monitoring the exporting operations
if (!stats->initialized) {
char id[RRD_ID_LENGTH_MAX + 1];
BUFFER *family = buffer_create(0);
buffer_sprintf(family, "exporting_%s", instance->config.name);
snprintf(id, RRD_ID_LENGTH_MAX, "exporting_%s_metrics", instance->config.name);
netdata_fix_chart_id(id);
stats->st_metrics = rrdset_create_localhost(
"netdata", id, NULL, buffer_tostring(family), NULL, "Netdata Buffered Metrics", "metrics", "exporting", NULL,
130610, instance->config.update_every, RRDSET_TYPE_LINE);
stats->rd_buffered_metrics = rrddim_add(stats->st_metrics, "buffered", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
stats->rd_lost_metrics = rrddim_add(stats->st_metrics, "lost", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
stats->rd_sent_metrics = rrddim_add(stats->st_metrics, "sent", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
// ------------------------------------------------------------------------
snprintf(id, RRD_ID_LENGTH_MAX, "exporting_%s_bytes", instance->config.name);
netdata_fix_chart_id(id);
stats->st_bytes = rrdset_create_localhost(
"netdata", id, NULL, buffer_tostring(family), NULL, "Netdata Exporting Data Size", "KiB", "exporting", NULL,
130620, instance->config.update_every, RRDSET_TYPE_AREA);
stats->rd_buffered_bytes = rrddim_add(stats->st_bytes, "buffered", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
stats->rd_lost_bytes = rrddim_add(stats->st_bytes, "lost", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
stats->rd_sent_bytes = rrddim_add(stats->st_bytes, "sent", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
stats->rd_received_bytes = rrddim_add(stats->st_bytes, "received", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
// ------------------------------------------------------------------------
snprintf(id, RRD_ID_LENGTH_MAX, "exporting_%s_ops", instance->config.name);
netdata_fix_chart_id(id);
stats->st_ops = rrdset_create_localhost(
"netdata", id, NULL, buffer_tostring(family), NULL, "Netdata Exporting Operations", "operations", "exporting",
NULL, 130630, instance->config.update_every, RRDSET_TYPE_LINE);
stats->rd_transmission_successes = rrddim_add(stats->st_ops, "write", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
stats->rd_data_lost_events = rrddim_add(stats->st_ops, "discard", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
stats->rd_reconnects = rrddim_add(stats->st_ops, "reconnect", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
stats->rd_transmission_failures = rrddim_add(stats->st_ops, "failure", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
stats->rd_receptions = rrddim_add(stats->st_ops, "read", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
// ------------------------------------------------------------------------
snprintf(id, RRD_ID_LENGTH_MAX, "exporting_%s_thread_cpu", instance->config.name);
netdata_fix_chart_id(id);
stats->st_rusage = rrdset_create_localhost(
"netdata", id, NULL, buffer_tostring(family), NULL, "Netdata Exporting Instance Thread CPU Usage",
"milliseconds/s", "exporting", NULL, 130640, instance->config.update_every, RRDSET_TYPE_STACKED);
stats->rd_user = rrddim_add(stats->st_rusage, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
stats->rd_system = rrddim_add(stats->st_rusage, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
buffer_free(family);
stats->initialized = 1;
}
// ------------------------------------------------------------------------
// update the monitoring charts
if (likely(stats->st_metrics->counter_done))
rrdset_next(stats->st_metrics);
rrddim_set_by_pointer(stats->st_metrics, stats->rd_buffered_metrics, stats->buffered_metrics);
rrddim_set_by_pointer(stats->st_metrics, stats->rd_lost_metrics, stats->lost_metrics);
rrddim_set_by_pointer(stats->st_metrics, stats->rd_sent_metrics, stats->sent_metrics);
rrdset_done(stats->st_metrics);
// ------------------------------------------------------------------------
if (likely(stats->st_bytes->counter_done))
rrdset_next(stats->st_bytes);
rrddim_set_by_pointer(stats->st_bytes, stats->rd_buffered_bytes, stats->buffered_bytes);
rrddim_set_by_pointer(stats->st_bytes, stats->rd_lost_bytes, stats->lost_bytes);
rrddim_set_by_pointer(stats->st_bytes, stats->rd_sent_bytes, stats->sent_bytes);
rrddim_set_by_pointer(stats->st_bytes, stats->rd_received_bytes, stats->received_bytes);
rrdset_done(stats->st_bytes);
// ------------------------------------------------------------------------
if (likely(stats->st_ops->counter_done))
rrdset_next(stats->st_ops);
rrddim_set_by_pointer(stats->st_ops, stats->rd_transmission_successes, stats->transmission_successes);
rrddim_set_by_pointer(stats->st_ops, stats->rd_data_lost_events, stats->data_lost_events);
rrddim_set_by_pointer(stats->st_ops, stats->rd_reconnects, stats->reconnects);
rrddim_set_by_pointer(stats->st_ops, stats->rd_transmission_failures, stats->transmission_failures);
rrddim_set_by_pointer(stats->st_ops, stats->rd_receptions, stats->receptions);
rrdset_done(stats->st_ops);
// ------------------------------------------------------------------------
struct rusage thread;
getrusage(RUSAGE_THREAD, &thread);
if (likely(stats->st_rusage->counter_done))
rrdset_next(stats->st_rusage);
rrddim_set_by_pointer(stats->st_rusage, stats->rd_user, thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec);
rrddim_set_by_pointer(stats->st_rusage, stats->rd_system, thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec);
rrdset_done(stats->st_rusage);
return 0;
}

View file

@ -20,7 +20,7 @@ struct engine *__mock_read_exporting_config()
engine->instance_root = calloc(1, sizeof(struct instance));
struct instance *instance = engine->instance_root;
instance->engine = engine;
instance->config.type = EXPORTING_CONNECTOR_TYPE_GRAPHITE;
instance->config.type = BACKEND_TYPE_GRAPHITE;
instance->config.name = strdupz("instance_name");
instance->config.destination = strdupz("localhost");
instance->config.update_every = 1;
@ -82,26 +82,10 @@ int __wrap_notify_workers(struct engine *engine)
return mock_type(int);
}
void __wrap_create_main_rusage_chart(RRDSET **st_rusage, RRDDIM **rd_user, RRDDIM **rd_system)
int __wrap_send_internal_metrics(struct engine *engine)
{
function_called();
check_expected_ptr(st_rusage);
check_expected_ptr(rd_user);
check_expected_ptr(rd_system);
}
void __wrap_send_main_rusage(RRDSET *st_rusage, RRDDIM *rd_user, RRDDIM *rd_system)
{
function_called();
check_expected_ptr(st_rusage);
check_expected_ptr(rd_user);
check_expected_ptr(rd_system);
}
int __wrap_send_internal_metrics(struct instance *instance)
{
function_called();
check_expected_ptr(instance);
check_expected_ptr(engine);
return mock_type(int);
}

View file

@ -127,35 +127,3 @@ int teardown_initialized_engine(void **state)
return 0;
}
int setup_prometheus(void **state)
{
(void)state;
prometheus_exporter_instance = calloc(1, sizeof(struct instance));
setup_rrdhost();
prometheus_exporter_instance->config.update_every = 10;
prometheus_exporter_instance->config.options |=
EXPORTING_OPTION_SEND_NAMES | EXPORTING_OPTION_SEND_CONFIGURED_LABELS | EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
prometheus_exporter_instance->config.charts_pattern = simple_pattern_create("*", NULL, SIMPLE_PATTERN_EXACT);
prometheus_exporter_instance->config.hosts_pattern = simple_pattern_create("*", NULL, SIMPLE_PATTERN_EXACT);
return 0;
}
int teardown_prometheus(void **state)
{
(void)state;
teardown_rrdhost();
simple_pattern_free(prometheus_exporter_instance->config.charts_pattern);
simple_pattern_free(prometheus_exporter_instance->config.hosts_pattern);
free(prometheus_exporter_instance);
return 0;
}

View file

@ -85,92 +85,6 @@ void __rrd_check_rdlock(const char *file, const char *function, const unsigned l
(void)line;
}
RRDSET *rrdset_create_custom(
RRDHOST *host,
const char *type,
const char *id,
const char *name,
const char *family,
const char *context,
const char *title,
const char *units,
const char *plugin,
const char *module,
long priority,
int update_every,
RRDSET_TYPE chart_type,
RRD_MEMORY_MODE memory_mode,
long history_entries)
{
check_expected_ptr(host);
check_expected_ptr(type);
check_expected_ptr(id);
check_expected_ptr(name);
check_expected_ptr(family);
check_expected_ptr(context);
UNUSED(title);
check_expected_ptr(units);
check_expected_ptr(plugin);
check_expected_ptr(module);
check_expected(priority);
check_expected(update_every);
check_expected(chart_type);
UNUSED(memory_mode);
UNUSED(history_entries);
function_called();
return mock_ptr_type(RRDSET *);
}
void rrdset_next_usec(RRDSET *st, usec_t microseconds)
{
check_expected_ptr(st);
UNUSED(microseconds);
function_called();
}
void rrdset_done(RRDSET *st)
{
check_expected_ptr(st);
function_called();
}
RRDDIM *rrddim_add_custom(
RRDSET *st,
const char *id,
const char *name,
collected_number multiplier,
collected_number divisor,
RRD_ALGORITHM algorithm,
RRD_MEMORY_MODE memory_mode)
{
check_expected_ptr(st);
UNUSED(id);
check_expected_ptr(name);
check_expected(multiplier);
check_expected(divisor);
check_expected(algorithm);
UNUSED(memory_mode);
function_called();
return NULL;
}
collected_number rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, collected_number value)
{
check_expected_ptr(st);
UNUSED(rd);
UNUSED(value);
function_called();
return 0;
}
const char *rrd_memory_mode_name(RRD_MEMORY_MODE id)
{
(void)id;

View file

@ -44,11 +44,6 @@ static void test_exporting_engine(void **state)
expect_memory(__wrap_init_connectors, engine, engine, sizeof(struct engine));
will_return(__wrap_init_connectors, 0);
expect_function_call(__wrap_create_main_rusage_chart);
expect_not_value(__wrap_create_main_rusage_chart, st_rusage, NULL);
expect_not_value(__wrap_create_main_rusage_chart, rd_user, NULL);
expect_not_value(__wrap_create_main_rusage_chart, rd_system, NULL);
expect_function_call(__wrap_now_realtime_sec);
will_return(__wrap_now_realtime_sec, 2);
@ -64,10 +59,9 @@ static void test_exporting_engine(void **state)
expect_memory(__wrap_notify_workers, engine, engine, sizeof(struct engine));
will_return(__wrap_notify_workers, 0);
expect_function_call(__wrap_send_main_rusage);
expect_value(__wrap_send_main_rusage, st_rusage, NULL);
expect_value(__wrap_send_main_rusage, rd_user, NULL);
expect_value(__wrap_send_main_rusage, rd_system, NULL);
expect_function_call(__wrap_send_internal_metrics);
expect_memory(__wrap_send_internal_metrics, engine, engine, sizeof(struct engine));
will_return(__wrap_send_internal_metrics, 0);
expect_function_call(__wrap_info_int);
@ -93,7 +87,7 @@ static void test_read_exporting_config(void **state)
assert_ptr_not_equal(instance, NULL);
assert_ptr_equal(instance->next, NULL);
assert_ptr_equal(instance->engine, engine);
assert_int_equal(instance->config.type, EXPORTING_CONNECTOR_TYPE_GRAPHITE);
assert_int_equal(instance->config.type, BACKEND_TYPE_GRAPHITE);
assert_string_equal(instance->config.destination, "localhost");
assert_int_equal(instance->config.update_every, 1);
assert_int_equal(instance->config.buffer_on_failures, 10);
@ -128,7 +122,7 @@ static void test_init_connectors(void **state)
assert_ptr_equal(instance->metric_formatting, format_dimension_collected_graphite_plaintext);
assert_ptr_equal(instance->end_chart_formatting, NULL);
assert_ptr_equal(instance->end_host_formatting, flush_host_labels);
assert_ptr_equal(instance->end_batch_formatting, simple_connector_update_buffered_bytes);
assert_ptr_equal(instance->end_batch_formatting, NULL);
BUFFER *buffer = instance->buffer;
assert_ptr_not_equal(buffer, NULL);
@ -390,7 +384,7 @@ static void test_prepare_buffers(void **state)
assert_int_equal(__real_prepare_buffers(engine), 0);
assert_int_equal(instance->stats.buffered_metrics, 1);
assert_int_equal(instance->stats.chart_buffered_metrics, 1);
// check with NULL functions
instance->start_batch_formatting = NULL;
@ -579,8 +573,8 @@ static void test_simple_connector_receive_response(void **state)
log_line,
"EXPORTING: received 9 bytes from instance_name connector instance. Ignoring them. Sample: 'Test recv'");
assert_int_equal(stats->received_bytes, 9);
assert_int_equal(stats->receptions, 1);
assert_int_equal(stats->chart_received_bytes, 9);
assert_int_equal(stats->chart_receptions, 1);
assert_int_equal(sock, 1);
}
@ -620,10 +614,10 @@ static void test_simple_connector_send_buffer(void **state)
simple_connector_send_buffer(&sock, &failures, instance);
assert_int_equal(failures, 0);
assert_int_equal(stats->transmission_successes, 1);
assert_int_equal(stats->sent_bytes, 84);
assert_int_equal(stats->sent_metrics, 1);
assert_int_equal(stats->transmission_failures, 0);
assert_int_equal(stats->chart_transmission_successes, 1);
assert_int_equal(stats->chart_sent_bytes, 84);
assert_int_equal(stats->chart_sent_metrics, 1);
assert_int_equal(stats->chart_transmission_failures, 0);
assert_int_equal(buffer_strlen(buffer), 0);
@ -634,7 +628,6 @@ static void test_simple_connector_worker(void **state)
{
struct engine *engine = *state;
struct instance *instance = engine->instance_root;
struct stats *stats = &instance->stats;
BUFFER *buffer = instance->buffer;
__real_mark_scheduled_instances(engine);
@ -668,24 +661,7 @@ static void test_simple_connector_worker(void **state)
expect_value(__wrap_send, len, 84);
expect_value(__wrap_send, flags, MSG_NOSIGNAL);
expect_function_call(__wrap_send_internal_metrics);
expect_value(__wrap_send_internal_metrics, instance, instance);
will_return(__wrap_send_internal_metrics, 0);
simple_connector_worker(instance);
assert_int_equal(stats->buffered_metrics, 0);
assert_int_equal(stats->buffered_bytes, 84);
assert_int_equal(stats->received_bytes, 0);
assert_int_equal(stats->sent_bytes, 84);
assert_int_equal(stats->sent_metrics, 1);
assert_int_equal(stats->lost_metrics, 0);
assert_int_equal(stats->receptions, 0);
assert_int_equal(stats->transmission_successes, 1);
assert_int_equal(stats->transmission_failures, 0);
assert_int_equal(stats->data_lost_events, 0);
assert_int_equal(stats->lost_bytes, 0);
assert_int_equal(stats->reconnects, 0);
}
static void test_sanitize_json_string(void **state)
@ -785,367 +761,6 @@ static void test_flush_host_labels(void **state)
assert_int_equal(buffer_strlen(instance->labels), 0);
}
static void test_create_main_rusage_chart(void **state)
{
UNUSED(state);
RRDSET *st_rusage = calloc(1, sizeof(RRDSET));
RRDDIM *rd_user = NULL;
RRDDIM *rd_system = NULL;
expect_function_call(rrdset_create_custom);
expect_value(rrdset_create_custom, host, localhost);
expect_string(rrdset_create_custom, type, "netdata");
expect_string(rrdset_create_custom, id, "exporting_main_thread_cpu");
expect_value(rrdset_create_custom, name, NULL);
expect_string(rrdset_create_custom, family, "exporting");
expect_value(rrdset_create_custom, context, NULL);
expect_string(rrdset_create_custom, units, "milliseconds/s");
expect_string(rrdset_create_custom, plugin, "exporting");
expect_value(rrdset_create_custom, module, NULL);
expect_value(rrdset_create_custom, priority, 130600);
expect_value(rrdset_create_custom, update_every, localhost->rrd_update_every);
expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_STACKED);
will_return(rrdset_create_custom, st_rusage);
expect_function_calls(rrddim_add_custom, 2);
expect_value_count(rrddim_add_custom, st, st_rusage, 2);
expect_value_count(rrddim_add_custom, name, NULL, 2);
expect_value_count(rrddim_add_custom, multiplier, 1, 2);
expect_value_count(rrddim_add_custom, divisor, 1000, 2);
expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_INCREMENTAL, 2);
__real_create_main_rusage_chart(&st_rusage, &rd_user, &rd_system);
free(st_rusage);
}
static void test_send_main_rusage(void **state)
{
UNUSED(state);
RRDSET *st_rusage = calloc(1, sizeof(RRDSET));
st_rusage->counter_done = 1;
expect_function_call(rrdset_next_usec);
expect_value(rrdset_next_usec, st, st_rusage);
expect_function_calls(rrddim_set_by_pointer, 2);
expect_value_count(rrddim_set_by_pointer, st, st_rusage, 2);
expect_function_call(rrdset_done);
expect_value(rrdset_done, st, st_rusage);
__real_send_main_rusage(st_rusage, NULL, NULL);
free(st_rusage);
}
static void test_send_internal_metrics(void **state)
{
UNUSED(state);
struct instance *instance = calloc(1, sizeof(struct instance));
instance->config.name = (const char *)strdupz("test_instance");
instance->config.update_every = 2;
struct stats *stats = &instance->stats;
stats->st_metrics = calloc(1, sizeof(RRDSET));
stats->st_metrics->counter_done = 1;
stats->st_bytes = calloc(1, sizeof(RRDSET));
stats->st_bytes->counter_done = 1;
stats->st_ops = calloc(1, sizeof(RRDSET));
stats->st_ops->counter_done = 1;
stats->st_rusage = calloc(1, sizeof(RRDSET));
stats->st_rusage->counter_done = 1;
// ------------------------------------------------------------------------
expect_function_call(rrdset_create_custom);
expect_value(rrdset_create_custom, host, localhost);
expect_string(rrdset_create_custom, type, "netdata");
expect_string(rrdset_create_custom, id, "exporting_test_instance_metrics");
expect_value(rrdset_create_custom, name, NULL);
expect_string(rrdset_create_custom, family, "exporting_test_instance");
expect_value(rrdset_create_custom, context, NULL);
expect_string(rrdset_create_custom, units, "metrics");
expect_string(rrdset_create_custom, plugin, "exporting");
expect_value(rrdset_create_custom, module, NULL);
expect_value(rrdset_create_custom, priority, 130610);
expect_value(rrdset_create_custom, update_every, 2);
expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_LINE);
will_return(rrdset_create_custom, stats->st_metrics);
expect_function_calls(rrddim_add_custom, 3);
expect_value_count(rrddim_add_custom, st, stats->st_metrics, 3);
expect_value_count(rrddim_add_custom, name, NULL, 3);
expect_value_count(rrddim_add_custom, multiplier, 1, 3);
expect_value_count(rrddim_add_custom, divisor, 1, 3);
expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_ABSOLUTE, 3);
// ------------------------------------------------------------------------
expect_function_call(rrdset_create_custom);
expect_value(rrdset_create_custom, host, localhost);
expect_string(rrdset_create_custom, type, "netdata");
expect_string(rrdset_create_custom, id, "exporting_test_instance_bytes");
expect_value(rrdset_create_custom, name, NULL);
expect_string(rrdset_create_custom, family, "exporting_test_instance");
expect_value(rrdset_create_custom, context, NULL);
expect_string(rrdset_create_custom, units, "KiB");
expect_string(rrdset_create_custom, plugin, "exporting");
expect_value(rrdset_create_custom, module, NULL);
expect_value(rrdset_create_custom, priority, 130620);
expect_value(rrdset_create_custom, update_every, 2);
expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_AREA);
will_return(rrdset_create_custom, stats->st_bytes);
expect_function_calls(rrddim_add_custom, 4);
expect_value_count(rrddim_add_custom, st, stats->st_bytes, 4);
expect_value_count(rrddim_add_custom, name, NULL, 4);
expect_value_count(rrddim_add_custom, multiplier, 1, 4);
expect_value_count(rrddim_add_custom, divisor, 1024, 4);
expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_ABSOLUTE, 4);
// ------------------------------------------------------------------------
expect_function_call(rrdset_create_custom);
expect_value(rrdset_create_custom, host, localhost);
expect_string(rrdset_create_custom, type, "netdata");
expect_string(rrdset_create_custom, id, "exporting_test_instance_ops");
expect_value(rrdset_create_custom, name, NULL);
expect_string(rrdset_create_custom, family, "exporting_test_instance");
expect_value(rrdset_create_custom, context, NULL);
expect_string(rrdset_create_custom, units, "operations");
expect_string(rrdset_create_custom, plugin, "exporting");
expect_value(rrdset_create_custom, module, NULL);
expect_value(rrdset_create_custom, priority, 130630);
expect_value(rrdset_create_custom, update_every, 2);
expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_LINE);
will_return(rrdset_create_custom, stats->st_ops);
expect_function_calls(rrddim_add_custom, 5);
expect_value_count(rrddim_add_custom, st, stats->st_ops, 5);
expect_value_count(rrddim_add_custom, name, NULL, 5);
expect_value_count(rrddim_add_custom, multiplier, 1, 5);
expect_value_count(rrddim_add_custom, divisor, 1, 5);
expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_ABSOLUTE, 5);
// ------------------------------------------------------------------------
expect_function_call(rrdset_create_custom);
expect_value(rrdset_create_custom, host, localhost);
expect_string(rrdset_create_custom, type, "netdata");
expect_string(rrdset_create_custom, id, "exporting_test_instance_thread_cpu");
expect_value(rrdset_create_custom, name, NULL);
expect_string(rrdset_create_custom, family, "exporting_test_instance");
expect_value(rrdset_create_custom, context, NULL);
expect_string(rrdset_create_custom, units, "milliseconds/s");
expect_string(rrdset_create_custom, plugin, "exporting");
expect_value(rrdset_create_custom, module, NULL);
expect_value(rrdset_create_custom, priority, 130640);
expect_value(rrdset_create_custom, update_every, 2);
expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_STACKED);
will_return(rrdset_create_custom, stats->st_rusage);
expect_function_calls(rrddim_add_custom, 2);
expect_value_count(rrddim_add_custom, st, stats->st_rusage, 2);
expect_value_count(rrddim_add_custom, name, NULL, 2);
expect_value_count(rrddim_add_custom, multiplier, 1, 2);
expect_value_count(rrddim_add_custom, divisor, 1000, 2);
expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_INCREMENTAL, 2);
// ------------------------------------------------------------------------
expect_function_call(rrdset_next_usec);
expect_value(rrdset_next_usec, st, stats->st_metrics);
expect_function_calls(rrddim_set_by_pointer, 3);
expect_value_count(rrddim_set_by_pointer, st, stats->st_metrics, 3);
expect_function_call(rrdset_done);
expect_value(rrdset_done, st, stats->st_metrics);
// ------------------------------------------------------------------------
expect_function_call(rrdset_next_usec);
expect_value(rrdset_next_usec, st, stats->st_bytes);
expect_function_calls(rrddim_set_by_pointer, 4);
expect_value_count(rrddim_set_by_pointer, st, stats->st_bytes, 4);
expect_function_call(rrdset_done);
expect_value(rrdset_done, st, stats->st_bytes);
// ------------------------------------------------------------------------
expect_function_call(rrdset_next_usec);
expect_value(rrdset_next_usec, st, stats->st_ops);
expect_function_calls(rrddim_set_by_pointer, 5);
expect_value_count(rrddim_set_by_pointer, st, stats->st_ops, 5);
expect_function_call(rrdset_done);
expect_value(rrdset_done, st, stats->st_ops);
// ------------------------------------------------------------------------
expect_function_call(rrdset_next_usec);
expect_value(rrdset_next_usec, st, stats->st_rusage);
expect_function_calls(rrddim_set_by_pointer, 2);
expect_value_count(rrddim_set_by_pointer, st, stats->st_rusage, 2);
expect_function_call(rrdset_done);
expect_value(rrdset_done, st, stats->st_rusage);
// ------------------------------------------------------------------------
__real_send_internal_metrics(instance);
free(stats->st_metrics);
free(stats->st_bytes);
free(stats->st_ops);
free(stats->st_rusage);
free((void *)instance->config.name);
free(instance);
}
static void test_can_send_rrdset(void **state)
{
(void)*state;
assert_int_equal(can_send_rrdset(prometheus_exporter_instance, localhost->rrdset_root), 1);
rrdset_flag_set(localhost->rrdset_root, RRDSET_FLAG_BACKEND_IGNORE);
assert_int_equal(can_send_rrdset(prometheus_exporter_instance, localhost->rrdset_root), 0);
rrdset_flag_clear(localhost->rrdset_root, RRDSET_FLAG_BACKEND_IGNORE);
// TODO: test with a denying simple pattern
rrdset_flag_set(localhost->rrdset_root, RRDSET_FLAG_OBSOLETE);
assert_int_equal(can_send_rrdset(prometheus_exporter_instance, localhost->rrdset_root), 0);
rrdset_flag_clear(localhost->rrdset_root, RRDSET_FLAG_OBSOLETE);
localhost->rrdset_root->rrd_memory_mode = RRD_MEMORY_MODE_NONE;
prometheus_exporter_instance->config.options |= EXPORTING_SOURCE_DATA_AVERAGE;
assert_int_equal(can_send_rrdset(prometheus_exporter_instance, localhost->rrdset_root), 0);
}
static void test_prometheus_name_copy(void **state)
{
(void)*state;
char destination_name[PROMETHEUS_ELEMENT_MAX + 1];
assert_int_equal(prometheus_name_copy(destination_name, "test-name", PROMETHEUS_ELEMENT_MAX), 9);
assert_string_equal(destination_name, "test_name");
}
static void test_prometheus_label_copy(void **state)
{
(void)*state;
char destination_name[PROMETHEUS_ELEMENT_MAX + 1];
assert_int_equal(prometheus_label_copy(destination_name, "test\"\\\nlabel", PROMETHEUS_ELEMENT_MAX), 15);
assert_string_equal(destination_name, "test\\\"\\\\\\\nlabel");
}
static void test_prometheus_units_copy(void **state)
{
(void)*state;
char destination_name[PROMETHEUS_ELEMENT_MAX + 1];
assert_string_equal(prometheus_units_copy(destination_name, "test-units", PROMETHEUS_ELEMENT_MAX, 0), "_test_units");
assert_string_equal(destination_name, "_test_units");
assert_string_equal(prometheus_units_copy(destination_name, "%", PROMETHEUS_ELEMENT_MAX, 0), "_percent");
assert_string_equal(prometheus_units_copy(destination_name, "test-units/s", PROMETHEUS_ELEMENT_MAX, 0), "_test_units_persec");
assert_string_equal(prometheus_units_copy(destination_name, "KiB", PROMETHEUS_ELEMENT_MAX, 1), "_KB");
}
static void test_format_host_labels_prometheus(void **state)
{
struct engine *engine = *state;
struct instance *instance = engine->instance_root;
instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
format_host_labels_prometheus(instance, localhost);
assert_string_equal(buffer_tostring(instance->labels), "key1=\"netdata\",key2=\"value2\"");
}
static void rrd_stats_api_v1_charts_allmetrics_prometheus(void **state)
{
(void)state;
BUFFER *buffer = buffer_create(0);
localhost->hostname = strdupz("test_hostname");
localhost->rrdset_root->family = strdupz("test_family");
localhost->rrdset_root->context = strdupz("test_context");
expect_function_call(__wrap_now_realtime_sec);
will_return(__wrap_now_realtime_sec, 2);
expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(localhost, buffer, "test_server", "test_prefix", 0, 0);
assert_string_equal(
buffer_tostring(buffer),
"netdata_info{instance=\"test_hostname\",application=\"(null)\",version=\"(null)\"} 1\n"
"netdata_host_tags_info{key1=\"value1\",key2=\"value2\"} 1\n"
"netdata_host_tags{key1=\"value1\",key2=\"value2\"} 1\n"
"test_prefix_test_context{chart=\"chart_id\",family=\"test_family\",dimension=\"dimension_id\"} 690565856.0000000\n");
buffer_flush(buffer);
expect_function_call(__wrap_now_realtime_sec);
will_return(__wrap_now_realtime_sec, 2);
expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(
localhost, buffer, "test_server", "test_prefix", 0, PROMETHEUS_OUTPUT_NAMES | PROMETHEUS_OUTPUT_TYPES);
assert_string_equal(
buffer_tostring(buffer),
"netdata_info{instance=\"test_hostname\",application=\"(null)\",version=\"(null)\"} 1\n"
"netdata_host_tags_info{key1=\"value1\",key2=\"value2\"} 1\n"
"netdata_host_tags{key1=\"value1\",key2=\"value2\"} 1\n"
"# COMMENT TYPE test_prefix_test_context gauge\n"
"test_prefix_test_context{chart=\"chart_name\",family=\"test_family\",dimension=\"dimension_name\"} 690565856.0000000\n");
buffer_flush(buffer);
expect_function_call(__wrap_now_realtime_sec);
will_return(__wrap_now_realtime_sec, 2);
expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(localhost, buffer, "test_server", "test_prefix", 0, 0);
assert_string_equal(
buffer_tostring(buffer),
"netdata_info{instance=\"test_hostname\",application=\"(null)\",version=\"(null)\"} 1\n"
"netdata_host_tags_info{instance=\"test_hostname\",key1=\"value1\",key2=\"value2\"} 1\n"
"netdata_host_tags{instance=\"test_hostname\",key1=\"value1\",key2=\"value2\"} 1\n"
"test_prefix_test_context{chart=\"chart_id\",family=\"test_family\",dimension=\"dimension_id\",instance=\"test_hostname\"} 690565856.0000000\n");
free(localhost->rrdset_root->context);
free(localhost->rrdset_root->family);
free(localhost->hostname);
buffer_free(buffer);
}
#if ENABLE_PROMETHEUS_REMOTE_WRITE
static void test_init_prometheus_remote_write_instance(void **state)
{
@ -1399,7 +1014,6 @@ static void test_aws_kinesis_connector_worker(void **state)
{
struct engine *engine = *state;
struct instance *instance = engine->instance_root;
struct stats *stats = &instance->stats;
BUFFER *buffer = instance->buffer;
__real_mark_scheduled_instances(engine);
@ -1445,25 +1059,8 @@ static void test_aws_kinesis_connector_worker(void **state)
expect_not_value(__wrap_kinesis_get_result, lost_bytes, NULL);
will_return(__wrap_kinesis_get_result, 0);
expect_function_call(__wrap_send_internal_metrics);
expect_value(__wrap_send_internal_metrics, instance, instance);
will_return(__wrap_send_internal_metrics, 0);
aws_kinesis_connector_worker(instance);
assert_int_equal(stats->buffered_metrics, 0);
assert_int_equal(stats->buffered_bytes, 84);
assert_int_equal(stats->received_bytes, 0);
assert_int_equal(stats->sent_bytes, 84);
assert_int_equal(stats->sent_metrics, 1);
assert_int_equal(stats->lost_metrics, 0);
assert_int_equal(stats->receptions, 1);
assert_int_equal(stats->transmission_successes, 1);
assert_int_equal(stats->transmission_failures, 0);
assert_int_equal(stats->data_lost_events, 0);
assert_int_equal(stats->lost_bytes, 0);
assert_int_equal(stats->reconnects, 0);
free(connector_specific_config->stream_name);
free(connector_specific_config->auth_key_id);
free(connector_specific_config->secure_key);
@ -1573,7 +1170,7 @@ static void test_format_batch_mongodb(void **state)
BUFFER *buffer = buffer_create(0);
buffer_sprintf(buffer, "{ \"metric\": \"test_metric\" }\n");
instance->buffer = buffer;
stats->buffered_metrics = 1;
stats->chart_buffered_metrics = 1;
assert_int_equal(format_batch_mongodb(instance), 0);
@ -1624,10 +1221,6 @@ static void test_mongodb_connector_worker(void **state)
expect_not_value(__wrap_mongoc_collection_insert_many, error, NULL);
will_return(__wrap_mongoc_collection_insert_many, true);
expect_function_call(__wrap_send_internal_metrics);
expect_value(__wrap_send_internal_metrics, instance, instance);
will_return(__wrap_send_internal_metrics, 0);
mongodb_connector_worker(instance);
assert_ptr_equal(connector_specific_data->first_buffer->insert, NULL);
@ -1635,18 +1228,11 @@ static void test_mongodb_connector_worker(void **state)
assert_ptr_equal(connector_specific_data->first_buffer, connector_specific_data->first_buffer->next);
struct stats *stats = &instance->stats;
assert_int_equal(stats->buffered_metrics, 0);
assert_int_equal(stats->buffered_bytes, 0);
assert_int_equal(stats->received_bytes, 0);
assert_int_equal(stats->sent_bytes, 30);
assert_int_equal(stats->sent_metrics, 1);
assert_int_equal(stats->lost_metrics, 0);
assert_int_equal(stats->receptions, 1);
assert_int_equal(stats->transmission_successes, 1);
assert_int_equal(stats->transmission_failures, 0);
assert_int_equal(stats->data_lost_events, 0);
assert_int_equal(stats->lost_bytes, 0);
assert_int_equal(stats->reconnects, 0);
assert_int_equal(stats->chart_sent_bytes, 60);
assert_int_equal(stats->chart_transmission_successes, 1);
assert_int_equal(stats->chart_receptions, 1);
assert_int_equal(stats->chart_sent_bytes, 60);
assert_int_equal(stats->chart_sent_metrics, 0);
free(connector_specific_config->database);
free(connector_specific_config->collection);
@ -1725,27 +1311,6 @@ int main(void)
int test_res = cmocka_run_group_tests_name("exporting_engine", tests, NULL, NULL) +
cmocka_run_group_tests_name("labels_in_exporting_engine", label_tests, NULL, NULL);
const struct CMUnitTest internal_metrics_tests[] = {
cmocka_unit_test(test_create_main_rusage_chart),
cmocka_unit_test(test_send_main_rusage),
cmocka_unit_test(test_send_internal_metrics),
};
test_res += cmocka_run_group_tests_name("internal_metrics", internal_metrics_tests, NULL, NULL);
const struct CMUnitTest prometheus_web_api_tests[] = {
cmocka_unit_test_setup_teardown(test_can_send_rrdset, setup_prometheus, teardown_prometheus),
cmocka_unit_test_setup_teardown(test_prometheus_name_copy, setup_prometheus, teardown_prometheus),
cmocka_unit_test_setup_teardown(test_prometheus_label_copy, setup_prometheus, teardown_prometheus),
cmocka_unit_test_setup_teardown(test_prometheus_units_copy, setup_prometheus, teardown_prometheus),
cmocka_unit_test_setup_teardown(
test_format_host_labels_prometheus, setup_configured_engine, teardown_configured_engine),
cmocka_unit_test_setup_teardown(
rrd_stats_api_v1_charts_allmetrics_prometheus, setup_prometheus, teardown_prometheus),
};
test_res += cmocka_run_group_tests_name("prometheus_web_api", prometheus_web_api_tests, NULL, NULL);
#if ENABLE_PROMETHEUS_REMOTE_WRITE
const struct CMUnitTest prometheus_remote_write_tests[] = {
cmocka_unit_test_setup_teardown(

View file

@ -91,14 +91,7 @@ int __wrap_prepare_buffers(struct engine *engine);
int __wrap_notify_workers(struct engine *engine);
void __real_create_main_rusage_chart(RRDSET **st_rusage, RRDDIM **rd_user, RRDDIM **rd_system);
void __wrap_create_main_rusage_chart(RRDSET **st_rusage, RRDDIM **rd_user, RRDDIM **rd_system);
void __real_send_main_rusage(RRDSET *st_rusage, RRDDIM *rd_user, RRDDIM *rd_system);
void __wrap_send_main_rusage(RRDSET *st_rusage, RRDDIM *rd_user, RRDDIM *rd_system);
int __real_send_internal_metrics(struct instance *instance);
int __wrap_send_internal_metrics(struct instance *instance);
int __wrap_send_internal_metrics(struct engine *engine);
int __real_rrdhost_is_exportable(struct instance *instance, RRDHOST *host);
int __wrap_rrdhost_is_exportable(struct instance *instance, RRDHOST *host);
@ -173,8 +166,6 @@ int setup_rrdhost();
int teardown_rrdhost();
int setup_initialized_engine(void **state);
int teardown_initialized_engine(void **state);
int setup_prometheus(void **state);
int teardown_prometheus(void **state);
void init_connectors_in_tests(struct engine *engine);

View file

@ -1,7 +1,6 @@
<!--
---
title: "Health configuration reference"
date: 2020-03-31
custom_edit_url: https://github.com/netdata/netdata/edit/master/health/REFERENCE.md
---
-->
@ -137,7 +136,7 @@ If you create a template using the `disk.io` context, it will apply an alarm to
The alarm or template will be used only if the operating system of the host matches this list specified in `os`. The
value is a space-separated list.
The following example enables the entity on Linux, FreeBSD, and macOS, but no other operating systems.
The following example enables the entity on Linux, FreeBSD, and MacOS, but no other operating systems.
```yaml
os: linux freebsd macos

View file

@ -35,7 +35,6 @@
# - Microsoft Team notification by @tioumen
# - RocketChat notifications by @Hermsi1337 #3777
# - Google Hangouts Chat notifications by @EnzoAkira and @hendrikhofstadt
# - Dynatrace Event by @illumine
# -----------------------------------------------------------------------------
# testing notifications
@ -171,7 +170,6 @@ awssns
rocketchat
sms
hangouts
dynatrace
"
# -----------------------------------------------------------------------------
@ -368,16 +366,6 @@ IRC_NETWORK=
# hangouts configs
declare -A HANGOUTS_WEBHOOK_URI
# dynatrace configs
DYNATRACE_SPACE=
DYNATRACE_SERVER=
DYNATRACE_TOKEN=
DYNATRACE_TAG_VALUE=
DYNATRACE_ANNOTATION_TYPE=
DYNATRACE_EVENT=
SEND_DYNATRACE=
# load the stock and user configuration files
# these will overwrite the variables above
@ -515,14 +503,6 @@ filter_recipient_by_criticality() {
#shellcheck disable=SC2153
{ [ -z "${FLEEP_SERVER}" ] || [ -z "${FLEEP_SENDER}" ]; } && SEND_FLEEP="NO"
# check dynatrace
{ [ -z "${DYNATRACE_SPACE}" ] ||
[ -z "${DYNATRACE_SERVER}" ] ||
[ -z "${DYNATRACE_TOKEN}" ] ||
[ -z "${DYNATRACE_TAG_VALUE}" ] ||
[ -z "${DYNATRACE_EVENT}" ]; } && SEND_DYNATRACE="NO"
if [ "${SEND_PUSHOVER}" = "YES" ] ||
[ "${SEND_SLACK}" = "YES" ] ||
[ "${SEND_ROCKETCHAT}" = "YES" ] ||
@ -541,8 +521,7 @@ if [ "${SEND_PUSHOVER}" = "YES" ] ||
[ "${SEND_PROWL}" = "YES" ] ||
[ "${SEND_HANGOUTS}" = "YES" ] ||
[ "${SEND_CUSTOM}" = "YES" ] ||
[ "${SEND_MSTEAM}" = "YES" ] ||
[ "${SEND_DYNATRACE}" = "YES" ]; then
[ "${SEND_MSTEAM}" = "YES" ]; then
# if we need curl, check for the curl command
if [ -z "${curl}" ]; then
curl="$(command -v curl 2>/dev/null)"
@ -568,7 +547,6 @@ if [ "${SEND_PUSHOVER}" = "YES" ] ||
SEND_PROWL="NO"
SEND_HANGOUTS="NO"
SEND_CUSTOM="NO"
SEND_DYNATRACE="NO"
fi
fi
@ -695,9 +673,7 @@ for method in "${SEND_EMAIL}" \
"${SEND_AWSSNS}" \
"${SEND_SYSLOG}" \
"${SEND_SMS}" \
"${SEND_MSTEAM}" \
"${SEND_DYNATRACE}"; do
"${SEND_MSTEAM}"; do
if [ "${method}" == "YES" ]; then
proceed=1
break
@ -1911,53 +1887,6 @@ EOF
return 1
}
# -----------------------------------------------------------------------------
# Dynatrace sender
send_dynatrace() {
[ "${SEND_DYNATRACE}" != "YES" ] && return 1
local dynatrace_url="${DYNATRACE_SERVER}/e/${DYNATRACE_SPACE}/api/v1/events"
local description="NetData Notification for: ${host} ${chart}.${name} is ${status}"
local payload=""
payload=$(cat <<EOF
{
"title": "NetData Alarm from ${host}",
"source" : "${DYNATRACE_ANNOTATION_TYPE}",
"description" : "${description}",
"eventType": "${DYNATRACE_EVENT}",
"attachRules":{
"tagRule":[{
"meTypes":["HOST"],
"tags":["${DYNATRACE_TAG_VALUE}"]
}]
},
"customProperties":{
"description": "${description}"
}
}
EOF
)
# echo ${payload}
httpcode=$(docurl -X POST -H "Authorization: Api-token ${DYNATRACE_TOKEN}" -H "Content-Type: application/json" -d "${payload}" ${dynatrace_url})
ret=$?
if [ ${ret} -eq 0 ]; then
if [ "${httpcode}" = "200" ]; then
info "sent ${DYNATRACE_EVENT} to ${DYNATRACE_SERVER}"
return 0
else
warning "Dynatrace ${DYNATRACE_SERVER} responded ${httpcode} notification for: ${host} ${chart}.${name} is ${status} was not sent!"
return 1
fi
else
error "failed to sent ${DYNATRACE_EVENT} notification for: ${host} ${chart}.${name} is ${status} to ${DYNATRACE_SERVER} with error code ${ret}."
return 1
fi
}
# -----------------------------------------------------------------------------
# prepare the content of the notification
@ -2462,12 +2391,6 @@ fi
SENT_EMAIL=$?
# -----------------------------------------------------------------------------
# send the EVENT to Dynatrace
send_dynatrace "${host}" "${chart}" "${name}" "${status}"
SENT_DYNATRACE=$?
# -----------------------------------------------------------------------------
# let netdata know
for state in "${SENT_EMAIL}" \
@ -2493,8 +2416,7 @@ for state in "${SENT_EMAIL}" \
"${SENT_AWSSNS}" \
"${SENT_SYSLOG}" \
"${SENT_SMS}" \
"${SENT_MSTEAM}" \
"${SENT_DYNATRACE}"; do
"${SENT_MSTEAM}"; do
if [ "${state}" -eq 0 ]; then
# we sent something
exit 0

View file

@ -1,12 +0,0 @@
# SPDX-License-Identifier: GPL-3.0-or-later
# THIS IS NOT A COMPLETE Makefile
# IT IS INCLUDED BY ITS PARENT'S Makefile.am
# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
# install these files
dist_noinst_DATA += \
dynatrace/README.md \
dynatrace/Makefile.inc \
$(NULL)

View file

@ -1,36 +0,0 @@
<!--
---
title: "Dynatrace"
custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/dynatrace/README.md
---
-->
# Dynatrace
Dynatrace allows you to receive notifications using their Events REST API.
See [the Dynatrace documentation](https://www.dynatrace.com/support/help/extend-dynatrace/dynatrace-api/environment-api/events/post-event/) about POSTing an event in the Events API for more details.
You need:
1. Dynatrace Server. You can use the same on all your Netdata servers but make sure the server is network visible from your Netdata hosts.
The Dynatrace server should be with protocol prefixed (`http://` or `https://`). For example: `https://monitor.example.com`
This is a required parameter.
2. API Token. Generate a secure access API token that enables access to your Dynatrace monitoring data via the REST-based API.
Generate a Dynatrace API authentication token. On your Dynatrace server, go to **Settings** --> **Integration** --> **Dynatrace API** --> **Generate token**.
See [Dynatrace API - Authentication](https://www.dynatrace.com/support/help/extend-dynatrace/dynatrace-api/basics/dynatrace-api-authentication/) for more details.
This is a required parameter.
3. API Space. This is the URL part of the page you have access in order to generate the API Token. For example, for my generated API Token the URL is:
https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all
In that case, my space is _2a93fe0e-4cd5-469a-9d0d-1a064235cfce_
This is a required parameter.
4. Generate a Server Tag. On your Dynatrace Server, go to **Settings** --> **Tags** --> **Manually applied tags** and create the Tag.
The Netdata alarm is sent as a Dynatrace Event to be correlated with all those hosts tagged with this Tag you have created.
This is a required parameter.
5. Specify the Dynatrace event. This can be one of `CUSTOM_INFO`, `CUSTOM_ANNOTATION`, `CUSTOM_CONFIGURATION`, and `CUSTOM_DEPLOYMENT`.
The default value is `CUSTOM_INFO`.
This is a required parameter.
6. Specify the annotation type. This is the source of the Dynatrace event. Put whatever it fits you, for example,
_Netdata Alarm_, which is also the default value.

View file

@ -229,43 +229,6 @@ DEFAULT_RECIPIENT_EMAIL="root"
# to not send HTML but Plain Text only emails.
#EMAIL_PLAINTEXT_ONLY="YES"
#------------------------------------------------------------------------------
# Dynatrace global notification options
#------------------------------------------------------------------------------
# enable/disable sending Dynatrace notifications
SEND_DYNATRACE="YES"
# The Dynatrace server with protocol prefix (http:// or https://), example https://monitor.illumineit.com
# Required
DYNATRACE_SERVER=""
# Generate a Dynatrace API authentication token
# Read https://www.dynatrace.com/support/help/extend-dynatrace/dynatrace-api/basics/dynatrace-api-authentication/
# On Dynatrace server goto Settings --> Integration --> Dynatrace API --> Generate token
# Required
DYNATRACE_TOKEN=""
# Beware: Space is taken from dynatrace URL from browser when you create the TOKEN
# Required
DYNATRACE_SPACE=""
# Generate a Server Tag. On the Dynatrace Server go to Settings --> Tags --> Manually applied tags create the Tag
# The NetData alarm will be sent as a Dynatrace Event to be correlated with all those hosts tagged with this Tag
# you created.
# Required
DYNATRACE_TAG_VALUE=""
# Change this to what you want
DYNATRACE_ANNOTATION_TYPE="NetData Alarm"
# This can be CUSTOM_INFO, CUSTOM_ANNOTATION, CUSTOM_CONFIGURATION, CUSTOM_DEPLOYMENT
# Applying default value
# Required
DYNATRACE_EVENT="CUSTOM_INFO"
DEFAULT_RECIPIENT_DYNATRACE=""
#------------------------------------------------------------------------------
# hangouts (google hangouts chat) global notification options
@ -910,7 +873,6 @@ role_recipients_msteam[sysadmin]="${DEFAULT_RECIPIENT_MSTEAM}"
role_recipients_rocketchat[sysadmin]="${DEFAULT_RECIPIENT_ROCKETCHAT}"
role_recipients_dynatrace[sysadmin]="${DEFAULT_RECIPIENT_DYNATRACE}"
# -----------------------------------------------------------------------------
# DNS related alarms
@ -960,7 +922,6 @@ role_recipients_rocketchat[domainadmin]="${DEFAULT_RECIPIENT_ROCKETCHAT}"
role_recipients_sms[domainadmin]="${DEFAULT_RECIPIENT_SMS}"
role_recipients_dynatrace[domainadmin]="${DEFAULT_RECIPIENT_DYNATRACE}"
# -----------------------------------------------------------------------------
# database servers alarms
# mysql, redis, memcached, postgres, etc
@ -1011,7 +972,6 @@ role_recipients_rocketchat[dba]="${DEFAULT_RECIPIENT_ROCKETCHAT}"
role_recipients_sms[dba]="${DEFAULT_RECIPIENT_SMS}"
role_recipients_dynatrace[dba]="${DEFAULT_RECIPIENT_DYNATRACE}"
# -----------------------------------------------------------------------------
# web servers alarms
# apache, nginx, lighttpd, etc
@ -1062,7 +1022,6 @@ role_recipients_rocketchat[webmaster]="${DEFAULT_RECIPIENT_ROCKETCHAT}"
role_recipients_sms[webmaster]="${DEFAULT_RECIPIENT_SMS}"
role_recipients_dynatrace[webmaster]="${DEFAULT_RECIPIENT_DYNATRACE}"
# -----------------------------------------------------------------------------
# proxy servers alarms
# squid, etc
@ -1113,7 +1072,7 @@ role_recipients_rocketchat[proxyadmin]="${DEFAULT_RECIPIENT_ROCKETCHAT}"
role_recipients_sms[proxyadmin]="${DEFAULT_RECIPIENT_SMS}"
role_recipients_dynatrace[proxyadmin]="${DEFAULT_RECIPIENT_DYNATRACE}"
# -----------------------------------------------------------------------------
# peripheral devices
# UPS, photovoltaics, etc
@ -1162,4 +1121,3 @@ role_recipients_rocketchat[sitemgr]="${DEFAULT_RECIPIENT_ROCKETCHAT}"
role_recipients_sms[sitemgr]="${DEFAULT_RECIPIENT_SMS}"
role_recipients_dynatrace[sitemgr]="${DEFAULT_RECIPIENT_DYNATRACE}"

View file

@ -547,8 +547,7 @@ int appconfig_load(struct config *root, char *filename, int overwrite_used, cons
s++;
if (is_exporter_config) {
global_exporting_section =
!(strcmp(s, CONFIG_SECTION_EXPORTING)) || !(strcmp(s, CONFIG_SECTION_PROMETHEUS));
global_exporting_section = !(strcmp(s, CONFIG_SECTION_EXPORTING));
if (unlikely(!global_exporting_section)) {
int rc;
rc = is_valid_connector(s, 0);

View file

@ -82,19 +82,18 @@
#define CONFIG_FILENAME "netdata.conf"
#define CONFIG_SECTION_GLOBAL "global"
#define CONFIG_SECTION_WEB "web"
#define CONFIG_SECTION_STATSD "statsd"
#define CONFIG_SECTION_PLUGINS "plugins"
#define CONFIG_SECTION_CLOUD "cloud"
#define CONFIG_SECTION_REGISTRY "registry"
#define CONFIG_SECTION_HEALTH "health"
#define CONFIG_SECTION_BACKEND "backend"
#define CONFIG_SECTION_STREAM "stream"
#define CONFIG_SECTION_EXPORTING "exporting:global"
#define CONFIG_SECTION_PROMETHEUS "prometheus:exporter"
#define CONFIG_SECTION_HOST_LABEL "host labels"
#define EXPORTING_CONF "exporting.conf"
#define CONFIG_SECTION_GLOBAL "global"
#define CONFIG_SECTION_WEB "web"
#define CONFIG_SECTION_STATSD "statsd"
#define CONFIG_SECTION_PLUGINS "plugins"
#define CONFIG_SECTION_CLOUD "cloud"
#define CONFIG_SECTION_REGISTRY "registry"
#define CONFIG_SECTION_HEALTH "health"
#define CONFIG_SECTION_BACKEND "backend"
#define CONFIG_SECTION_STREAM "stream"
#define CONFIG_SECTION_EXPORTING "exporting:global"
#define CONFIG_SECTION_HOST_LABEL "host labels"
#define EXPORTING_CONF "exporting.conf"
// these are used to limit the configuration names and values lengths
// they are not enforced by config.c functions (they will strdup() all strings, no matter of their length)

View file

@ -192,7 +192,7 @@ get_os_release() {
eval "$(grep -E "^(NAME|ID|ID_LIKE|VERSION|VERSION_ID)=" "${os_release_file}")"
for x in "${ID}" ${ID_LIKE}; do
case "${x,,}" in
alpine | arch | centos | clear-linux-os | debian | fedora | gentoo | manjaro | opensuse-leap | rhel | sabayon | sles | suse | ubuntu)
alpine | arch | centos | debian | fedora | gentoo | sabayon | rhel | ubuntu | suse | opensuse-leap | sles | clear-linux-os)
distribution="${x}"
version="${VERSION_ID}"
codename="${VERSION}"
@ -1047,9 +1047,7 @@ declare -A pkg_zip=(
)
validate_package_trees() {
if type -t validate_tree_${tree} > /dev/null; then
validate_tree_${tree}
fi
validate_tree_${tree}
}
validate_installed_package() {

View file

@ -75,7 +75,7 @@ sudo ./netdata-installer.sh --install /usr/local
> Your Netdata configuration directory will be at `/usr/local/netdata/`, and your stock configuration directory will
> be at **`/usr/local/lib/netdata/conf.d/`.**
>
> The installer will also install a startup plist to start Netdata when your macOS system boots.
> The installer will also install a startup plist to start Netdata when your Mac boots.
## What's next?

View file

@ -1,7 +1,6 @@
<!--
---
title: "Package Maintainers"
date: 2020-03-31
custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/maintainers/README.md
---
-->
@ -37,11 +36,11 @@ This page tracks the package maintainers for Netdata, for various operating syst
---
## macOS
## MacOS
| System | URL | Core Developer | Package Maintainer
|:-:|:-:|:-:|:-:|
| macOS Homebrew Formula|[link](https://github.com/Homebrew/homebrew-core/blob/master/Formula/netdata.rb)|@vlvkobal|@rickard-von-essen
| MacOS Homebrew Formula|[link](https://github.com/Homebrew/homebrew-core/blob/master/Formula/netdata.rb)|@vlvkobal|@rickard-von-essen
---