0
0
Fork 0
mirror of https://github.com/netdata/netdata.git synced 2025-04-14 01:29:11 +00:00
* split rrdfunctions streaming and progress

* simplified internal inline functions API

* split rrdfunctions inflight management

* split rrd functions exporters

* renames

* base dyncfg structure

* config pluginsd

* intercept dyncfg function calls

* loading and saving of dyncfg metadata and data

* save metadata and payload to a single file; added code to update the plugins with jobs and saved configs

* basic working unit test

* added payload to functions execution

* removed old dyncfg code that is not needed any more

* more cleanup

* cleanup sender for functions with payload

* dyncfg functions are not exposed as functions

* remaining work to avoid indexing the \0 terminating character in dictionary keys

* added back old dyncfg plugins.d commands as noop, to allow plugins continue working

* working api; working streaming;

* updated plugins.d documentation

* aclk and http api requests share the same header parsing logic

* added source type internal

* fixed crashes

* added god mode for tests

* fixes

* fixed messages

* save host machine guids to configs

* cleaner manipulation of supported commands

* the functions event loop for external plugins can now process dyncfg requests

* unified internal and external plugins dyncfg API

* Netdata serves schema requests from /etc/netdata/schema.d and /var/lib/netdata/conf.d/schema.d

* cleanup and various fixes; fixed bug in previous dyncfg implementation on streaming that was sending the paylod in a way that allowed other streaming commands to be multiplexed

* internals go to a separate header file

* fix duplicate ACLK requests sent by aclk queue mechanism

* use fstat instead of stat

* working api

* plugin actions renamed to create and delete; dyncfg files are removed only from user actions

* prevent deadlock by using the react callback

* fix for string_strndupz()

* better dyncfg unittests

* more tests at the unittests

* properly detect dyncfg functions

* hide config functions from the UI

* tree response improvements

* send the initial update with payload

* determine tty using stdout, not stderr

* changes to statuses, cleanup and the code to bring all business logic into interception

* do not crash when the status is empty

* functions now propagate the source of the requests to plugins

* avoid warning about unused functions

* in the count at items for attention, do not count the orphan entries

* save source into dyncfg

* make the list null terminated

* fixed invalid comparison

* prevent memory leak on duplicated headers; log x-forwarded-for

* more unit tests

* added dyncfg unittests into the default unittests

* more unit tests and fixes

* more unit tests and fixes

* fix dictionary unittests

* config functions require admin access
This commit is contained in:
Costa Tsaousis 2024-01-11 16:56:45 +02:00 committed by GitHub
parent bead543ea5
commit f2b250a1f5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
121 changed files with 6593 additions and 6060 deletions
.codacy.ymlCMakeLists.txt
aclk
collectors
daemon
database
libnetdata

View file

@ -21,5 +21,4 @@ exclude_paths:
- web/server/h2o/libh2o/**
- build/**
- build_external/**
- libnetdata/dyn_conf/tests/**
- packaging/**

View file

@ -144,7 +144,7 @@ if(NOT ${DISABLE_HARDENING})
endif()
if(NOT ${CMAKE_C_FLAGS} MATCHES "stack-clash-protection")
check_c_compiler_flag("-fstack-clash-protection", HAVE_STACK_CLASH_FLAG)
check_c_compiler_flag("-fstack-clash-protection" HAVE_STACK_CLASH_FLAG)
if(HAVE_STACK_CLASH_FLAG)
set(EXTRA_HARDENING_FLAGS "${EXTRA_HARDENING_FLAGS} -fstack-clash-protection")
endif()
@ -641,8 +641,10 @@ set(LIBNETDATA_FILES
libnetdata/http/http_access.h
libnetdata/http/http_defs.c
libnetdata/http/http_defs.h
libnetdata/dyn_conf/dyn_conf.c
libnetdata/dyn_conf/dyn_conf.h
libnetdata/http/content_type.c
libnetdata/http/content_type.h
libnetdata/config/dyncfg.c
libnetdata/config/dyncfg.h
)
if(ENABLE_PLUGIN_EBPF)
@ -760,6 +762,15 @@ set(DAEMON_FILES
daemon/pipename.h
daemon/unit_test.c
daemon/unit_test.h
daemon/config/dyncfg.c
daemon/config/dyncfg.h
daemon/config/dyncfg-files.c
daemon/config/dyncfg-unittest.c
daemon/config/dyncfg-inline.c
daemon/config/dyncfg-echo.c
daemon/config/dyncfg-internals.h
daemon/config/dyncfg-intercept.c
daemon/config/dyncfg-tree.c
)
set(H2O_FILES
@ -784,6 +795,10 @@ set(API_PLUGIN_FILES
web/api/web_api_v1.h
web/api/web_api_v2.c
web/api/web_api_v2.h
web/api/http_auth.c
web/api/http_auth.h
web/api/http_header.c
web/api/http_header.h
web/api/badges/web_buffer_svg.c
web/api/badges/web_buffer_svg.h
web/api/exporters/allmetrics.c
@ -929,6 +944,12 @@ set(RRD_PLUGIN_FILES
database/rrdfamily.c
database/rrdfunctions.c
database/rrdfunctions.h
database/rrdfunctions-inline.c
database/rrdfunctions-inline.h
database/rrdfunctions-progress.c
database/rrdfunctions-progress.h
database/rrdfunctions-streaming.c
database/rrdfunctions-streaming.h
database/rrdhost.c
database/rrdlabels.c
database/rrd.c
@ -965,6 +986,12 @@ set(RRD_PLUGIN_FILES
database/sqlite/dbdata.c
database/KolmogorovSmirnovDist.c
database/KolmogorovSmirnovDist.h
database/rrdfunctions-inflight.c
database/rrdfunctions-inflight.h
database/rrdfunctions-exporters.c
database/rrdfunctions-exporters.h
database/rrdfunctions-internals.h
database/rrdcollector-internals.h
)
if(ENABLE_DBENGINE)
@ -1022,6 +1049,7 @@ set(SYSTEMD_JOURNAL_PLUGIN_FILES
collectors/systemd-journal.plugin/systemd-journal-files.c
collectors/systemd-journal.plugin/systemd-journal-fstat.c
collectors/systemd-journal.plugin/systemd-journal-watcher.c
collectors/systemd-journal.plugin/systemd-journal-dyncfg.c
)
set(STREAMING_PLUGIN_FILES

View file

@ -99,30 +99,49 @@ static int http_api_v2(struct aclk_query_thread *query_thr, aclk_query_t query)
BUFFER *local_buffer = NULL;
size_t size = 0;
size_t sent = 0;
usec_t dt_ut = 0;
int z_ret;
BUFFER *z_buffer = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE, &netdata_buffers_statistics.buffers_aclk);
char *start, *end;
struct web_client *w = web_client_get_from_cache();
web_client_set_conn_cloud(w);
w->acl = HTTP_ACL_ACLK;
w->access = HTTP_ACCESS_MEMBER; // the minimum access level for all requests from netdata cloud
web_client_flags_clear_auth(w);
web_client_flag_set(w, WEB_CLIENT_FLAG_AUTH_CLOUD);
w->mode = HTTP_REQUEST_MODE_GET;
w->timings.tv_in = query->created_tv;
w->interrupt.callback = aclk_web_client_interrupt_cb;
w->interrupt.callback_data = pending_req_list_add(query->msg_id);
usec_t t;
buffer_flush(w->response.data);
buffer_strcat(w->response.data, query->data.http_api_v2.payload);
HTTP_VALIDATION validation = http_request_validate(w);
if(validation != HTTP_VALIDATION_OK) {
nd_log(NDLS_ACCESS, NDLP_ERR, "ACLK received request is not valid, code %d", validation);
retval = 1;
w->response.code = HTTP_RESP_BAD_REQUEST;
w->response.code = (short)aclk_http_msg_v2(query_thr->client, query->callback_topic, query->msg_id,
dt_ut, query->created, w->response.code,
NULL, 0);
goto cleanup;
}
web_client_timeout_checkpoint_set(w, query->timeout);
if(web_client_timeout_checkpoint_and_check(w, &t)) {
nd_log(NDLS_ACCESS, NDLP_ERR, "QUERY CANCELED: QUEUE TIME EXCEEDED %llu ms (LIMIT %d ms)", t / USEC_PER_MS, query->timeout);
if(web_client_timeout_checkpoint_and_check(w, &dt_ut)) {
nd_log(NDLS_ACCESS, NDLP_ERR,
"QUERY CANCELED: QUEUE TIME EXCEEDED %llu ms (LIMIT %d ms)",
dt_ut / USEC_PER_MS, query->timeout);
retval = 1;
w->response.code = HTTP_RESP_SERVICE_UNAVAILABLE;
aclk_http_msg_v2_err(query_thr->client, query->callback_topic, query->msg_id, w->response.code, CLOUD_EC_SND_TIMEOUT, CLOUD_EMSG_SND_TIMEOUT, NULL, 0);
goto cleanup;
}
web_client_decode_path_and_query_string(w, query->data.http_api_v2.query);
char *path = (char *)buffer_tostring(w->url_path_decoded);
if (aclk_stats_enabled) {
@ -134,41 +153,24 @@ static int http_api_v2(struct aclk_query_thread *query_thr, aclk_query_t query)
}
w->response.code = (short)web_client_api_request_with_node_selection(localhost, w, path);
web_client_timeout_checkpoint_response_ready(w, &t);
web_client_timeout_checkpoint_response_ready(w, &dt_ut);
if (aclk_stats_enabled) {
ACLK_STATS_LOCK;
aclk_metrics_per_sample.cloud_q_process_total += t;
aclk_metrics_per_sample.cloud_q_process_total += dt_ut;
aclk_metrics_per_sample.cloud_q_process_count++;
if (aclk_metrics_per_sample.cloud_q_process_max < t)
aclk_metrics_per_sample.cloud_q_process_max = t;
if (aclk_metrics_per_sample.cloud_q_process_max < dt_ut)
aclk_metrics_per_sample.cloud_q_process_max = dt_ut;
ACLK_STATS_UNLOCK;
}
size = w->response.data->len;
sent = size;
// check if gzip encoding can and should be used
if ((start = strstr((char *)query->data.http_api_v2.payload, WEB_HDR_ACCEPT_ENC))) {
start += strlen(WEB_HDR_ACCEPT_ENC);
end = strstr(start, "\x0D\x0A");
start = strstr(start, "gzip");
if (start && start < end) {
w->response.zstream.zalloc = Z_NULL;
w->response.zstream.zfree = Z_NULL;
w->response.zstream.opaque = Z_NULL;
if(deflateInit2(&w->response.zstream, web_gzip_level, Z_DEFLATED, 15 + 16, 8, web_gzip_strategy) == Z_OK) {
w->response.zinitialized = true;
w->response.zoutput = true;
} else
netdata_log_error("Failed to initialize zlib. Proceeding without compression.");
}
}
if (w->response.data->len && w->response.zinitialized) {
w->response.zstream.next_in = (Bytef *)w->response.data->buffer;
w->response.zstream.avail_in = w->response.data->len;
do {
w->response.zstream.avail_out = NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE;
w->response.zstream.next_out = w->response.zbuffer;
@ -188,6 +190,7 @@ static int http_api_v2(struct aclk_query_thread *query_thr, aclk_query_t query)
memcpy(&z_buffer->buffer[z_buffer->len], w->response.zbuffer, bytes_to_cpy);
z_buffer->len += bytes_to_cpy;
} while(z_ret != Z_STREAM_END);
// so that web_client_build_http_header
// puts correct content length into header
buffer_free(w->response.data);
@ -213,7 +216,9 @@ static int http_api_v2(struct aclk_query_thread *query_thr, aclk_query_t query)
}
// send msg.
w->response.code = aclk_http_msg_v2(query_thr->client, query->callback_topic, query->msg_id, t, query->created, w->response.code, local_buffer->buffer, local_buffer->len);
w->response.code = (short)aclk_http_msg_v2(query_thr->client, query->callback_topic, query->msg_id,
dt_ut, query->created, w->response.code,
local_buffer->buffer, local_buffer->len);
cleanup:
web_client_log_completed_request(w, false);

View file

@ -10,11 +10,9 @@ static netdata_mutex_t aclk_query_queue_mutex = NETDATA_MUTEX_INITIALIZER;
static struct aclk_query_queue {
aclk_query_t head;
aclk_query_t tail;
int block_push;
} aclk_query_queue = {
.head = NULL,
.tail = NULL,
.block_push = 0
};
@ -31,15 +29,7 @@ static inline int _aclk_queue_query(aclk_query_t query)
aclk_query_free(query);
return 1;
}
if (!aclk_query_queue.head) {
aclk_query_queue.head = query;
aclk_query_queue.tail = query;
ACLK_QUEUE_UNLOCK;
return 0;
}
// TODO deduplication
aclk_query_queue.tail->next = query;
aclk_query_queue.tail = query;
DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(aclk_query_queue.head, query, prev, next);
ACLK_QUEUE_UNLOCK;
return 0;
@ -77,9 +67,7 @@ aclk_query_t aclk_queue_pop(void)
return ret;
}
aclk_query_queue.head = ret->next;
if (unlikely(!aclk_query_queue.head))
aclk_query_queue.tail = aclk_query_queue.head;
DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(aclk_query_queue.head, ret, prev, next);
ACLK_QUEUE_UNLOCK;
ret->next = NULL;

View file

@ -55,7 +55,7 @@ struct aclk_query {
struct timeval created_tv;
usec_t created;
int timeout;
aclk_query_t next;
aclk_query_t prev, next;
// TODO maybe remove?
int version;

View file

@ -101,7 +101,7 @@ static inline int aclk_v2_payload_get_query(const char *payload, char **query_ur
const char *start, *end;
// TODO better check of URL
if(strncmp(payload, ACLK_CLOUD_REQ_V2_PREFIX, strlen(ACLK_CLOUD_REQ_V2_PREFIX))) {
if(strncmp(payload, ACLK_CLOUD_REQ_V2_PREFIX, strlen(ACLK_CLOUD_REQ_V2_PREFIX)) != 0) {
errno = 0;
netdata_log_error("Only accepting requests that start with \"%s\" from CLOUD.", ACLK_CLOUD_REQ_V2_PREFIX);
return 1;
@ -196,7 +196,7 @@ int aclk_handle_cloud_cmd_message(char *payload)
// Originally we were expecting to have multiple types of 'cmd' message,
// but after the new protocol was designed we will ever only have 'http'
if (strcmp(cloud_to_agent.type_id, "http")) {
if (strcmp(cloud_to_agent.type_id, "http") != 0) {
error_report("Only 'http' cmd message is supported");
goto err_cleanup;
}

View file

@ -4397,7 +4397,9 @@ static void apps_plugin_function_processes_help(const char *transaction) {
buffer_json_add_array_item_double(wb, _tmp); \
} while(0)
static void function_processes(const char *transaction, char *function __maybe_unused, usec_t *stop_monotonic_ut __maybe_unused, bool *cancelled __maybe_unused) {
static void function_processes(const char *transaction, char *function __maybe_unused,
usec_t *stop_monotonic_ut __maybe_unused, bool *cancelled __maybe_unused,
BUFFER *payload __maybe_unused, const char *source __maybe_unused, void *data __maybe_unused) {
struct pid_stat *p;
char *words[PLUGINSD_MAX_WORDS] = { NULL };
@ -4459,8 +4461,8 @@ static void function_processes(const char *transaction, char *function __maybe_u
return;
}
else {
char msg[PLUGINSD_LINE_MAX];
snprintfz(msg, PLUGINSD_LINE_MAX, "Invalid parameter '%s'", keyword);
char msg[1024];
snprintfz(msg, sizeof(msg), "Invalid parameter '%s'", keyword);
pluginsd_function_json_error_to_stdout(transaction, HTTP_RESP_BAD_REQUEST, msg);
return;
}
@ -4472,7 +4474,7 @@ static void function_processes(const char *transaction, char *function __maybe_u
unsigned int memory_divisor = 1024;
unsigned int io_divisor = 1024 * RATES_DETAIL;
BUFFER *wb = buffer_create(PLUGINSD_LINE_MAX, NULL);
BUFFER *wb = buffer_create(4096, NULL);
buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_NEWLINE_ON_ARRAY_ITEMS);
buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
buffer_json_member_add_string(wb, "type", "table");
@ -5348,7 +5350,7 @@ int main(int argc, char **argv) {
struct functions_evloop_globals *wg =
functions_evloop_init(1, "APPS", &apps_and_stdout_mutex, &apps_plugin_exit);
functions_evloop_add_function(wg, "processes", function_processes, PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT);
functions_evloop_add_function(wg, "processes", function_processes, PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT, NULL);
// ------------------------------------------------------------------------

View file

@ -454,23 +454,8 @@ static inline char *cgroup_chart_type(char *buffer, struct cgroup *cg) {
#define RRDFUNCTIONS_CGTOP_HELP "View running containers"
#define RRDFUNCTIONS_SYSTEMD_SERVICES_HELP "View systemd services"
int cgroup_function_cgroup_top(uuid_t *transaction, BUFFER *wb,
usec_t *stop_monotonic_ut, const char *function, void *collector_data,
rrd_function_result_callback_t result_cb, void *result_cb_data,
rrd_function_progress_cb_t progress_cb, void *progress_cb_data,
rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
rrd_function_register_canceller_cb_t register_canceller_cb, void *register_canceller_cb_data,
rrd_function_register_progresser_cb_t register_progresser_cb,
void *register_progresser_cb_data);
int cgroup_function_systemd_top(uuid_t *transaction, BUFFER *wb,
usec_t *stop_monotonic_ut, const char *function, void *collector_data,
rrd_function_result_callback_t result_cb, void *result_cb_data,
rrd_function_progress_cb_t progress_cb, void *progress_cb_data,
rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
rrd_function_register_canceller_cb_t register_canceller_cb, void *register_canceller_cb_data,
rrd_function_register_progresser_cb_t register_progresser_cb,
void *register_progresser_cb_data);
int cgroup_function_cgroup_top(BUFFER *wb, const char *function);
int cgroup_function_systemd_top(BUFFER *wb, const char *function);
void cgroup_netdev_link_init(void);
const DICTIONARY_ITEM *cgroup_netdev_get(struct cgroup *cg);

View file

@ -97,17 +97,7 @@ void cgroup_netdev_get_bandwidth(struct cgroup *cg, NETDATA_DOUBLE *received, NE
*sent = t->sent[slot];
}
int cgroup_function_cgroup_top(uuid_t *transaction __maybe_unused, BUFFER *wb,
usec_t *stop_monotonic_ut __maybe_unused, const char *function __maybe_unused,
void *collector_data __maybe_unused,
rrd_function_result_callback_t result_cb, void *result_cb_data,
rrd_function_progress_cb_t progress_cb __maybe_unused, void *progress_cb_data __maybe_unused,
rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
rrd_function_register_canceller_cb_t register_canceller_cb __maybe_unused,
void *register_canceller_cb_data __maybe_unused,
rrd_function_register_progresser_cb_t register_progresser_cb __maybe_unused,
void *register_progresser_cb_data __maybe_unused) {
int cgroup_function_cgroup_top(BUFFER *wb, const char *function __maybe_unused) {
buffer_flush(wb);
wb->content_type = CT_APPLICATION_JSON;
buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT);
@ -334,29 +324,10 @@ int cgroup_function_cgroup_top(uuid_t *transaction __maybe_unused, BUFFER *wb,
buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1);
buffer_json_finalize(wb);
int response = HTTP_RESP_OK;
if(is_cancelled_cb && is_cancelled_cb(is_cancelled_cb_data)) {
buffer_flush(wb);
response = HTTP_RESP_CLIENT_CLOSED_REQUEST;
}
if(result_cb)
result_cb(wb, response, result_cb_data);
return response;
return HTTP_RESP_OK;
}
int cgroup_function_systemd_top(uuid_t *transaction __maybe_unused, BUFFER *wb,
usec_t *stop_monotonic_ut __maybe_unused, const char *function __maybe_unused,
void *collector_data __maybe_unused,
rrd_function_result_callback_t result_cb, void *result_cb_data,
rrd_function_progress_cb_t progress_cb __maybe_unused, void *progress_cb_data __maybe_unused,
rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
rrd_function_register_canceller_cb_t register_canceller_cb __maybe_unused,
void *register_canceller_cb_data __maybe_unused,
rrd_function_register_progresser_cb_t register_progresser_cb __maybe_unused,
void *register_progresser_cb_data __maybe_unused) {
int cgroup_function_systemd_top(BUFFER *wb, const char *function __maybe_unused) {
buffer_flush(wb);
wb->content_type = CT_APPLICATION_JSON;
buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT);
@ -514,14 +485,5 @@ int cgroup_function_systemd_top(uuid_t *transaction __maybe_unused, BUFFER *wb,
buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1);
buffer_json_finalize(wb);
int response = HTTP_RESP_OK;
if(is_cancelled_cb && is_cancelled_cb(is_cancelled_cb_data)) {
buffer_flush(wb);
response = HTTP_RESP_CLIENT_CLOSED_REQUEST;
}
if(result_cb)
result_cb(wb, response, result_cb_data);
return response;
return HTTP_RESP_OK;
}

View file

@ -1671,16 +1671,15 @@ void *cgroups_main(void *ptr) {
// we register this only on localhost
// for the other nodes, the origin server should register it
rrd_collector_started(); // this creates a collector that runs for as long as netdata runs
cgroup_netdev_link_init();
rrd_function_add(localhost, NULL, "containers-vms", 10, RRDFUNCTIONS_PRIORITY_DEFAULT / 2,
RRDFUNCTIONS_CGTOP_HELP, "top", HTTP_ACCESS_ANY,
true, cgroup_function_cgroup_top, NULL);
rrd_function_add_inline(localhost, NULL, "containers-vms", 10,
RRDFUNCTIONS_PRIORITY_DEFAULT / 2, RRDFUNCTIONS_CGTOP_HELP,
"top", HTTP_ACCESS_ANY, cgroup_function_cgroup_top);
rrd_function_add(localhost, NULL, "systemd-services", 10, RRDFUNCTIONS_PRIORITY_DEFAULT / 3,
RRDFUNCTIONS_SYSTEMD_SERVICES_HELP, "top", HTTP_ACCESS_ANY,
true, cgroup_function_systemd_top, NULL);
rrd_function_add_inline(localhost, NULL, "systemd-services", 10,
RRDFUNCTIONS_PRIORITY_DEFAULT / 3, RRDFUNCTIONS_SYSTEMD_SERVICES_HELP,
"top", HTTP_ACCESS_ANY, cgroup_function_systemd_top);
heartbeat_t hb;
heartbeat_init(&hb);

View file

@ -636,17 +636,7 @@ static void diskspace_main_cleanup(void *ptr) {
#error WORKER_UTILIZATION_MAX_JOB_TYPES has to be at least 3
#endif
int diskspace_function_mount_points(uuid_t *transaction __maybe_unused, BUFFER *wb,
usec_t *stop_monotonic_ut __maybe_unused, const char *function __maybe_unused,
void *collector_data __maybe_unused,
rrd_function_result_callback_t result_cb, void *result_cb_data,
rrd_function_progress_cb_t progress_cb __maybe_unused, void *progress_cb_data __maybe_unused,
rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
rrd_function_register_canceller_cb_t register_canceller_cb __maybe_unused,
void *register_canceller_cb_data __maybe_unused,
rrd_function_register_progresser_cb_t register_progresser_cb __maybe_unused,
void *register_progresser_cb_data __maybe_unused) {
int diskspace_function_mount_points(BUFFER *wb, const char *function __maybe_unused) {
buffer_flush(wb);
wb->content_type = CT_APPLICATION_JSON;
buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT);
@ -850,16 +840,7 @@ int diskspace_function_mount_points(uuid_t *transaction __maybe_unused, BUFFER *
buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1);
buffer_json_finalize(wb);
int response = HTTP_RESP_OK;
if(is_cancelled_cb && is_cancelled_cb(is_cancelled_cb_data)) {
buffer_flush(wb);
response = HTTP_RESP_CLIENT_CLOSED_REQUEST;
}
if(result_cb)
result_cb(wb, response, result_cb_data);
return response;
return HTTP_RESP_OK;
}
void *diskspace_main(void *ptr) {
@ -868,10 +849,9 @@ void *diskspace_main(void *ptr) {
worker_register_job_name(WORKER_JOB_MOUNTPOINT, "mountpoint");
worker_register_job_name(WORKER_JOB_CLEANUP, "cleanup");
rrd_collector_started();
rrd_function_add(localhost, NULL, "mount-points", 10, RRDFUNCTIONS_PRIORITY_DEFAULT, RRDFUNCTIONS_DISKSPACE_HELP,
"top", HTTP_ACCESS_ANY,
true, diskspace_function_mount_points, NULL);
rrd_function_add_inline(localhost, NULL, "mount-points", 10,
RRDFUNCTIONS_PRIORITY_DEFAULT, RRDFUNCTIONS_DISKSPACE_HELP,
"top", HTTP_ACCESS_ANY, diskspace_function_mount_points);
netdata_thread_cleanup_push(diskspace_main_cleanup, ptr);

View file

@ -277,7 +277,10 @@ void ebpf_socket_read_open_connections(BUFFER *buf, struct ebpf_module *em)
static void ebpf_function_socket_manipulation(const char *transaction,
char *function __maybe_unused,
usec_t *stop_monotonic_ut __maybe_unused,
bool *cancelled __maybe_unused)
bool *cancelled __maybe_unused,
BUFFER *payload __maybe_unused,
const char *source __maybe_unused,
void *data __maybe_unused)
{
ebpf_module_t *em = &ebpf_modules[EBPF_MODULE_SOCKET_IDX];
@ -434,7 +437,7 @@ for (int i = 1; i < PLUGINSD_MAX_WORDS; i++) {
}
pthread_mutex_unlock(&ebpf_exit_cleanup);
BUFFER *wb = buffer_create(PLUGINSD_LINE_MAX, NULL);
BUFFER *wb = buffer_create(4096, NULL);
buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_NEWLINE_ON_ARRAY_ITEMS);
buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
buffer_json_member_add_string(wb, "type", "table");
@ -684,7 +687,7 @@ void *ebpf_function_thread(void *ptr)
&ebpf_plugin_exit);
functions_evloop_add_function(
wg, EBPF_FUNCTION_SOCKET, ebpf_function_socket_manipulation, PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT);
wg, EBPF_FUNCTION_SOCKET, ebpf_function_socket_manipulation, PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT, NULL);
pthread_mutex_lock(&lock);
int i;

View file

@ -1471,10 +1471,12 @@ static const char *get_sensor_function_priority(struct sensor *sn) {
}
}
static void freeimi_function_sensors(const char *transaction, char *function __maybe_unused, usec_t *stop_monotonic_ut __maybe_unused, bool *cancelled __maybe_unused) {
static void freeimi_function_sensors(const char *transaction, char *function __maybe_unused,
usec_t *stop_monotonic_ut __maybe_unused, bool *cancelled __maybe_unused,
BUFFER *payload __maybe_unused, const char *source __maybe_unused, void *data __maybe_unused) {
time_t expires = now_realtime_sec() + update_every;
BUFFER *wb = buffer_create(PLUGINSD_LINE_MAX, NULL);
BUFFER *wb = buffer_create(4096, NULL);
buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_NEWLINE_ON_ARRAY_ITEMS);
buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
buffer_json_member_add_string(wb, "type", "table");
@ -1973,7 +1975,7 @@ int main (int argc, char **argv) {
size_t iteration = 0;
usec_t step = 100 * USEC_PER_MS;
bool global_chart_created = false;
bool tty = isatty(fileno(stderr)) == 1;
bool tty = isatty(fileno(stdout)) == 1;
heartbeat_t hb;
heartbeat_init(&hb);
@ -2045,7 +2047,7 @@ int main (int argc, char **argv) {
struct functions_evloop_globals *wg =
functions_evloop_init(1, "FREEIPMI", &stdout_mutex, &function_plugin_should_exit);
functions_evloop_add_function(
wg, "ipmi-sensors", freeimi_function_sensors, PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT);
wg, "ipmi-sensors", freeimi_function_sensors, PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT, NULL);
FREEIPMI_GLOBAL_FUNCTION_SENSORS();
}

View file

@ -136,7 +136,8 @@ Netdata parses lines starting with:
- `FUNCTION` - define functions
- `FUNCTION_PROGRESS` - report the progress of a function execution
- `FUNCTION_RESULT_BEGIN` - to initiate the transmission of function results
- `FUNCTION_RESULT_END` - to end the transmission of function results
- `FUNCTION_RESULT_END` - to end the transmission of function result
- `CONFIG` - to define dynamic configuration entities
a single program can produce any number of charts with any number of dimensions each.
@ -147,7 +148,8 @@ Netdata may send the following commands to the plugin's `stdin`:
- `FUNCTION` - to call a specific function, with all parameters inline
- `FUNCTION_PAYLOAD` - to call a specific function, with a payload of parameters
- `FUNCTION_PAYLOAD_END` - to end the payload of parameters
- `FUNCTION_CANCEL` - cancel a running function transaction
- `FUNCTION_CANCEL` - to cancel a running function transaction - no response is required
- `FUNCTION_PROGRESS` - to report that a user asked the progress of running function call - no response is required
### Command line parameters
@ -471,43 +473,41 @@ The plugin can register functions to Netdata, like this:
> FUNCTION [GLOBAL] "name and parameters of the function" timeout "help string for users" "tags" "access"
- Tags currently recognized are either `top` or `logs` (or both, space separated).
- Access is one of `any`, `members`, or `admins`.
- Access is one of `any`, `member`, or `admin`:
- `any` to offer the function to all users of Netdata, even if they are not authenticated.
- `member` to offer the function to all authenticated members of Netdata.
- `admin` to offer the function only to authenticated administrators.
A function can be used by users to ask for more information from the collector. Netdata maintains a registry of functions in 2 levels:
- per node
- per chart
Both node and chart functions are exactly the same, but chart functions allow Netdata to relate functions with charts and therefore present a context sensitive menu of functions related to the chart the user is using.
Both node and chart functions are exactly the same, but chart functions allow Netdata to relate functions with charts and therefore present a context-sensitive menu of functions related to the chart the user is using.
A function is identified by a string. The allowed characters in the function definition are:
Users can get a list of all the registered functions using the `/api/v1/functions` endpoint of Netdata and call functions using the `/api/v1/function` API call of Netdata.
| Character | Symbol | In Functions |
|-------------------|:------:|:------------:|
| UTF-8 character | UTF-8 | keep |
| Lower case letter | [a-z] | keep |
| Upper case letter | [A-Z] | keep |
| Digit | [0-9] | keep |
| Underscore | _ | keep |
| Comma | , | keep |
| Minus | - | keep |
| Period | . | keep |
| Colon | : | keep |
| Slash | / | keep |
| Space | ' ' | keep |
| Semicolon | ; | : |
| Equal | = | : |
| Backslash | \ | / |
| Anything else | | _ |
Uses can get a list of all the registered functions using the `/api/v1/functions` end point of Netdata.
Users can call functions using the `/api/v1/function` end point of Netdata.
Once a function is called, the plugin will receive at its standard input a command that looks like this:
> FUNCTION transaction_id timeout "name and parameters of the function"
> FUNCTION transaction_id timeout "name and parameters of the function as one quoted parameter" "source of request"
The plugin is expected to parse and validate `name and parameters of the function`. Netdata allows users to edit this string, append more parameters or even change the ones the plugin originally exposed. To minimize the security risk, Netdata guarantees that only the characters shown above are accepted in function definitions, but still the plugin should carefully inspect the `name and parameters of the function` to ensure that it is valid and not harmful.
When the function to be called is to receive a payload of parameters, the call looks like this:
> FUNCTION_PAYLOAD transaction_id timeout "name and parameters of the function as one quoted parameter" "source of request" "content/type"
> body of the payload, formatted according to content/type
> FUNCTION PAYLOAD END
In this case, Netdata will send:
- A line starting with `FUNCTION_PAYLOAD` together with the required metadata for the function, like the transaction id, the function name and its parameters, the timeout and the content type. This line ends with a newline.
- Then, the payload itself (which may or may not have newlines in it). The payload should be parsed according to the content type parameter.
- Finally, a line starting with `FUNCTION_PAYLOAD_END`, so it is expected like `\nFUNCTION_PAYLOAD_END\n`.
Note 1: The plugins.d protocol allows parameters without single or double quotes if they don't contain spaces. However, the plugin should be able to parse parameters even if they are enclosed in single or double quotes. If the first character of a parameter is a single quote, its last character should also be a single quote too, and similarly for double quotes.
Note 2: Netdata always sends the function and its parameters enclosed in double quotes. If the function command and its parameters contain quotes, they are converted to single quotes.
The plugin is expected to parse and validate `name and parameters of the function as one quotes parameter`. Netdata allows the user interface to manipulate this string by appending more parameters.
If the plugin rejects the request, it should respond with this:
@ -522,12 +522,12 @@ FUNCTION_RESULT_END
If the plugin prepares a response, it should send (via its standard output, together with the collected data, but not interleaved with them):
> FUNCTION_RESULT_BEGIN transaction_id http_error_code content_type expiration
> FUNCTION_RESULT_BEGIN transaction_id http_response_code content_type expiration
Where:
- `transaction_id` is the transaction id that Netdata sent for this function execution
- `http_error` is the http error code Netdata should respond with, 200 is the "ok" response
- `http_response_code` is the http error code Netdata should respond with, 200 is the "ok" response
- `content_type` is the content type of the response
- `expiration` is the absolute timestamp (number, unix epoch) this response expires
@ -543,6 +543,158 @@ This defines the end of the message. `FUNCTION_RESULT_END` should appear in a li
After this line, Netdata resumes processing collected metrics from the plugin.
The maximum uncompressed payload size Netdata will accept is 100MB.
##### Functions cancellation
Netdata is able to detect when a user made an API request, but abandoned it before it was completed. If this happens to an API called for a function served by the plugin, Netdata will generate a `FUNCTION_CANCEL` request to let the plugin know that it can stop processing the query.
After receiving such a command, the plugin **must still send a response for the original function request**, to wake up any waiting threads before they timeout. The http response code is not important, since the response will be discarded, however for auditing reasons we suggest to send back a 499 http response code. This is not a standard response code according to the HTTP protocol, but web servers like `nginx` are using it to indicate that a request was abandoned by a user.
##### Functions progress
When a request takes too long to be processed, Netdata allows the plugin to report progress to Netdata, which in turn will report progress to the caller.
The plugin can send `FUNCTION_PROGRESS` like this:
> FUNCTION_PROGRESS transaction_id done all
Where:
- `transaction_id` is the transaction id of the function request
- `done` is an integer value indicating the amount of work done
- `all` is an integer value indicating the total amount of work to be done
Netdata supports two kinds of progress:
- progress as a percentage, which is calculated as `done * 100 / all`
- progress without knowing the total amount of work to be done, which is enabled when the plugin reports `all` as zero.
##### Functions timeout
All functions calls specify a timeout, at which all the intermediate routing nodes (parents, web server threads) will time out and abort the call.
However, all intermediate routing nodes are configured to extend the timeout when the caller asks for progress. This works like this:
When a progress request is received, if the expected timeout of the request is less than or equal to 10 seconds, the expected timeout is extended by 10 seconds.
Usually, the user interface asks for a progress every second. So, during the last 10 seconds of the timeout, every progress request made shifts the timeout 10 seconds to the future.
To accomplish this, when Netdata receives a progress request by a user, it generates progress requests to the plugin, updating all the intermediate nodes to extend their timeout if necessary.
The plugin will receive progress requests like this:
> FUNCTION_PROGRESS transaction_id
There is no need to respond to this command. It is only there to let the plugin know that a user is still waiting for the query to finish.
#### CONFIG
`CONFIG` commands sent from the plugin to Netdata define dynamic configuration entities. These configurable entities are exposed to the user interface, allowing users to change configuration at runtime.
Dynamically configurations made this way are saved to disk by Netdata and are replayed automatically when Netdata or the plugin restarts.
`CONFIG` commands look like this:
> CONFIG id action ...
Where:
- `id` is a unique identifier for the configurable entity. This should by design be unique across Netdata. It should be something like `plugin:module:jobs`, e.g. `go.d:postgresql:jobs:masterdb`. This is assumed to be colon-separated with the last part (`masterdb` in our example), being the one displayed to users when there ano conflicts under the same configuration path.
- `action` can be:
- `create`, to declare the dynamic configuration entity
- `delete`, to delete the dynamic configuration entity - this does not delete user configuration, we if an entity with the same id is created in the future, the saved configuration will be given to it.
- `status`, to update the dynamic configuration entity status
> IMPORTANT:<br/>
> The plugin should blindly create, delete and update the status of its dynamic configuration entities, without any special logic applied to it. Netdata needs to be updated of what is actually happening at the plugin. Keep in mind that creating dynamic configuration entities triggers responses from Netdata, depending on its type and status. Re-creating a job, triggers the same responses every time.
When the `action` is `create`, the following additional parameters are expected:
> CONFIG id action status type "path" source_type "source" "supported commands"
Where:
- `action` should be `create`
- `status` can be:
- `accepted`, the plugin accepted the configuration, but it is not running yet.
- `running`, the plugin accepted and runs the configuration.
- `failed`, the plugin tries to run the configuration but it fails.
- `incomplete`, the plugin needs additional settings to run this configuration. This is usually used for the cases the plugin discovered a job, but important information is missing for it to work.
- `disabled`, the configuration has been disabled by a user.
- `orphan`, the configuration is not claimed by any plugin. This is used internally by Netdata to mark the configuration nodes available, for which there is no plugin related to them. Do not use in plugins directly.
- `type` can be `single`, `template` or `job`:
- `single` is used when the configurable entity is fixed and users should never be able to add or delete it.
- `template` is used to define a template based on which users can add multiple configurations, like adding data collection jobs. So, the plugin defines the template of the jobs and users are presented with a `[+]` button to add such configuration jobs. The plugin can define multiple templates by giving different `id`s to them.
- `job` is used to define a job of a template. The plugin should always add all its jobs, independently of the way they have been discovered. It is important to note the relation between `template` and `job` when it comes it the `id`: The `id` of the template should be the prefix of the `job`'s `id`. For example, if the template is `go.d:postgresql:jobs`, then all its jobs be like `go.d:postgresql:jobs:jobname`.
- `path` is the absolute path of the configurable entity inside the tree of Netdata configurations. Usually, this is should be `/collectors`.
- `source` can be `internal`, `stock`, `user`, `discovered` or `dyncfg`:
- `internal` is used for configurations that are based on internal code settings
- `stock` is used for default configurations
- `discovered` is used for dynamic configurations the plugin discovers by its own
- `user` is used for user configurations, usually via a configuration file
- `dyncfg` is used for configuration received via this dynamic configuration mechanism
- `source` should provide more details about the exact source of the configuration, like `line@file`, or `user@ip`, etc.
- `supported_commands` is a space separated list of the following keywords, enclosed in single or double quotes. These commands are used by the user interface to determine the actions the users can take:
- `schema`, to expose the JSON schema for the user interface. This is mandatory for all configurable entities. When `schema` requests are received, Netdata will first attempt to load the schema from `/etc/netdata/schema.d/` and `/var/lib/netdata/conf.d/schema.d`. For jobs, it will serve the schema of their template. If no schema is found for the required `id`, the `schema` request will be forwarded to the plugin, which is expected to send back the relevant schema.
- `get`, to expose the current configuration values, according the schema defined. `templates` cannot support `get`, since they don't maintain any data.
- `update`, to receive configuration updates for this entity. `templates` cannot support `update`, since they don't maintain any data.
- `test`, like `update` but only test the configuration and report success or failure.
- `add`, to receive job creation commands for templates. Only `templates` should support this command.
- `remove`, to remove a configuration. Only `jobs` should support this command.
- `enable` and `disable`, to receive user requests to enable and disable this entity. Adding only one of `enable` or `disable` to the supported commands, Netdata will add both of them. The plugin should expose these commands on `templates` only when it wants to receive `enable` and `disable` commands for all the `jobs` of this `template`.
- `restart`, to restart a job.
The plugin receives commands as if it had exposed a `FUNCTION` named `config`. Netdata formats all these calls like this:
> config id command
Where `id` is the unique id of the configurable entity and `command` is one of the supported commands the plugin sent to Netdata.
The plugin will receive (for commands: `schema`, `get`, `remove`, `enable` and `disable`):
```
FUNCTION transaction_id timeout "config id command"
```
or (for commands: `update`, `add` and `test`):
```
FUNCTION_PAYLOAD transaction_id timeout "config id command" "content/type"
body of the payload formatted according to content/type
FUNCTION_PAYLOAD_END
```
Once received, the plugin should process it and respond accordingly.
Immediately after the plugin adds a configuration entity, if the commands `enable` and `disable` are supported by it, Netdata will send either `enable` or `disable` for it, based on the last user action, which has been persisted to disk.
Plugin responses follow the same format `FUNCTIONS` do:
```
FUNCTION_RESULT_BEGIN transaction_id http_response_code content/type expiration
body of the response formatted according to content/type
FUNCTION_RESULT_END
```
Successful responses (HTTP response code 200) to `schema` and `get` should send back the relevant JSON object.
All other responses should have the following response body:
```json
{
"status" : 404,
"message" : "some text"
}
```
The user interface presents the message to users, even when the response is successful (HTTP code 200).
When responding to additions and updates, Netdata uses the following success response codes to derive additional information:
- `200`, responding with 200, means the configuration has been accepted and it is running.
- `202`, responding with 202, means the configuration has been accepted but it is not yet running. A subsequent `status` action will update it.
- `299`, responding with 299, means the configuration has been accepted but a restart is required to apply it.
## Data collection
data collection is defined as a series of `BEGIN` -> `SET` -> `END` lines

View file

@ -36,25 +36,29 @@ LABEL, 51, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_R
OVERWRITE, 52, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 18
SET, 11, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 19
VARIABLE, 53, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 20
DYNCFG_ENABLE, 101, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 21
DYNCFG_REGISTER_MODULE, 102, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 22
DYNCFG_REGISTER_JOB, 103, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 23
DYNCFG_RESET, 104, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 24
REPORT_JOB_STATUS, 110, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 25
DELETE_JOB, 111, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 26
CONFIG, 100, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 21
#
# Streaming only keywords
#
CLAIMED_ID, 61, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 27
BEGIN2, 2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 28
SET2, 1, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 29
END2, 3, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 30
CLAIMED_ID, 61, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 22
BEGIN2, 2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 23
SET2, 1, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 24
END2, 3, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 25
#
# Streaming Replication keywords
#
CHART_DEFINITION_END, 33, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 31
RBEGIN, 22, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 32
RDSTATE, 23, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 33
REND, 25, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 34
RSET, 21, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 35
RSSTATE, 24, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 36
CHART_DEFINITION_END, 33, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 26
RBEGIN, 22, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 27
RDSTATE, 23, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 28
REND, 25, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 29
RSET, 21, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 30
RSSTATE, 24, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 31
#
# obsolete - do nothing commands
#
DYNCFG_ENABLE, 901, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 32
DYNCFG_REGISTER_MODULE, 902, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 33
DYNCFG_REGISTER_JOB, 903, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 34
DYNCFG_RESET, 904, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 35
REPORT_JOB_STATUS, 905, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 36
DELETE_JOB, 906, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 37

View file

@ -30,11 +30,11 @@
#endif
#define GPERF_PARSER_TOTAL_KEYWORDS 36
#define GPERF_PARSER_TOTAL_KEYWORDS 37
#define GPERF_PARSER_MIN_WORD_LENGTH 3
#define GPERF_PARSER_MAX_WORD_LENGTH 22
#define GPERF_PARSER_MIN_HASH_VALUE 3
#define GPERF_PARSER_MAX_HASH_VALUE 48
#define GPERF_PARSER_MIN_HASH_VALUE 7
#define GPERF_PARSER_MAX_HASH_VALUE 52
/* maximum key range = 46, duplicates = 0 */
#ifdef __GNUC__
@ -49,116 +49,122 @@ gperf_keyword_hash_function (register const char *str, register size_t len)
{
static unsigned char asso_values[] =
{
49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 23, 29, 0, 0, 0,
0, 49, 9, 0, 49, 49, 20, 49, 0, 8,
49, 49, 1, 12, 49, 23, 6, 49, 2, 0,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49, 49, 49, 49, 49,
49, 49, 49, 49, 49, 49
53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 6, 24, 3, 9, 6,
0, 53, 3, 27, 53, 53, 33, 53, 42, 0,
53, 53, 0, 30, 53, 12, 3, 53, 9, 0,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53
};
return len + asso_values[(unsigned char)str[1]] + asso_values[(unsigned char)str[0]];
}
static PARSER_KEYWORD gperf_keywords[] =
{
{(char*)0}, {(char*)0}, {(char*)0}, {(char*)0},
{(char*)0}, {(char*)0}, {(char*)0},
#line 30 "gperf-config.txt"
{"END", 13, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 13},
#line 51 "gperf-config.txt"
{"END2", 3, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 30},
#line 58 "gperf-config.txt"
{"REND", 25, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 34},
#line 17 "gperf-config.txt"
{"EXIT", 99, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 3},
#line 16 "gperf-config.txt"
{"DISABLE", 98, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 2},
#line 57 "gperf-config.txt"
{"RDSTATE", 23, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 33},
#line 29 "gperf-config.txt"
{"DIMENSION", 31, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 12},
#line 44 "gperf-config.txt"
{"DELETE_JOB", 111, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 26},
{(char*)0},
#line 42 "gperf-config.txt"
{"DYNCFG_RESET", 104, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 24},
#line 39 "gperf-config.txt"
{"DYNCFG_ENABLE", 101, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 21},
#line 26 "gperf-config.txt"
{"CHART", 32, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 9},
#line 37 "gperf-config.txt"
{"SET", 11, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 19},
#line 50 "gperf-config.txt"
{"SET2", 1, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 29},
#line 59 "gperf-config.txt"
{"RSET", 21, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 35},
#line 43 "gperf-config.txt"
{"REPORT_JOB_STATUS", 110, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 25},
#line 41 "gperf-config.txt"
{"DYNCFG_REGISTER_JOB", 103, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 23},
#line 60 "gperf-config.txt"
{"RSSTATE", 24, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 36},
#line 18 "gperf-config.txt"
{"HOST", 71, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 4},
#line 40 "gperf-config.txt"
{"DYNCFG_REGISTER_MODULE", 102, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 22},
{(char*)0},
#line 39 "gperf-config.txt"
{"CONFIG", 100, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 21},
#line 53 "gperf-config.txt"
{"REND", 25, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 29},
#line 26 "gperf-config.txt"
{"CHART", 32, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 9},
#line 36 "gperf-config.txt"
{"OVERWRITE", 52, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 18},
{(char*)0},
#line 15 "gperf-config.txt"
{"FLUSH", 97, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 1},
#line 27 "gperf-config.txt"
{"CLABEL", 34, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 10},
#line 21 "gperf-config.txt"
{"HOST_LABEL", 74, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 7},
#line 19 "gperf-config.txt"
{"HOST_DEFINE", 72, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 5},
#line 55 "gperf-config.txt"
{"CHART_DEFINITION_END", 33, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 31},
#line 48 "gperf-config.txt"
{"CLAIMED_ID", 61, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 27},
#line 31 "gperf-config.txt"
{"FUNCTION", 41, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 14},
#line 20 "gperf-config.txt"
{"HOST_DEFINE_END", 73, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 6},
#line 28 "gperf-config.txt"
{"CLABEL_COMMIT", 35, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 11},
#line 25 "gperf-config.txt"
{"BEGIN", 12, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 8},
#line 49 "gperf-config.txt"
{"BEGIN2", 2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 28},
#line 56 "gperf-config.txt"
{"RBEGIN", 22, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 32},
{(char*)0},
#line 52 "gperf-config.txt"
{"RDSTATE", 23, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 28},
#line 38 "gperf-config.txt"
{"VARIABLE", 53, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 20},
{(char*)0}, {(char*)0},
#line 20 "gperf-config.txt"
{"HOST_DEFINE_END", 73, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 6},
#line 17 "gperf-config.txt"
{"EXIT", 99, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 3},
#line 31 "gperf-config.txt"
{"FUNCTION", 41, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 14},
#line 62 "gperf-config.txt"
{"DYNCFG_RESET", 904, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 35},
#line 59 "gperf-config.txt"
{"DYNCFG_ENABLE", 901, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 32},
#line 63 "gperf-config.txt"
{"REPORT_JOB_STATUS", 905, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 36},
{(char*)0},
#line 64 "gperf-config.txt"
{"DELETE_JOB", 906, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 37},
#line 50 "gperf-config.txt"
{"CHART_DEFINITION_END", 33, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 26},
{(char*)0},
#line 61 "gperf-config.txt"
{"DYNCFG_REGISTER_JOB", 903, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 34},
#line 33 "gperf-config.txt"
{"FUNCTION_PROGRESS", 43, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 16},
{(char*)0}, {(char*)0}, {(char*)0},
#line 51 "gperf-config.txt"
{"RBEGIN", 22, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 27},
#line 60 "gperf-config.txt"
{"DYNCFG_REGISTER_MODULE", 902, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 33},
{(char*)0},
#line 32 "gperf-config.txt"
{"FUNCTION_RESULT_BEGIN", 42, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 15},
{(char*)0}, {(char*)0}, {(char*)0},
#line 54 "gperf-config.txt"
{"RSET", 21, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 30},
#line 25 "gperf-config.txt"
{"BEGIN", 12, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 8},
#line 44 "gperf-config.txt"
{"BEGIN2", 2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 23},
#line 55 "gperf-config.txt"
{"RSSTATE", 24, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 31},
#line 15 "gperf-config.txt"
{"FLUSH", 97, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 1},
#line 37 "gperf-config.txt"
{"SET", 11, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 19},
#line 45 "gperf-config.txt"
{"SET2", 1, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 24},
{(char*)0},
#line 27 "gperf-config.txt"
{"CLABEL", 34, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 10},
#line 16 "gperf-config.txt"
{"DISABLE", 98, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 2},
#line 35 "gperf-config.txt"
{"LABEL", 51, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 17}
{"LABEL", 51, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 17},
#line 29 "gperf-config.txt"
{"DIMENSION", 31, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 12},
#line 43 "gperf-config.txt"
{"CLAIMED_ID", 61, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 22},
{(char*)0}, {(char*)0},
#line 28 "gperf-config.txt"
{"CLABEL_COMMIT", 35, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 11},
{(char*)0},
#line 30 "gperf-config.txt"
{"END", 13, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 13},
#line 46 "gperf-config.txt"
{"END2", 3, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 25}
};
PARSER_KEYWORD *

View file

@ -10,14 +10,6 @@
#define PLUGINSD_CMD_MAX (FILENAME_MAX*2)
#define PLUGINSD_STOCK_PLUGINS_DIRECTORY_PATH 0
#define PLUGINSD_KEYWORD_DYNCFG_ENABLE "DYNCFG_ENABLE"
#define PLUGINSD_KEYWORD_DYNCFG_REGISTER_MODULE "DYNCFG_REGISTER_MODULE"
#define PLUGINSD_KEYWORD_DYNCFG_REGISTER_JOB "DYNCFG_REGISTER_JOB"
#define PLUGINSD_KEYWORD_DYNCFG_RESET "DYNCFG_RESET"
#define PLUGINSD_KEYWORD_REPORT_JOB_STATUS "REPORT_JOB_STATUS"
#define PLUGINSD_KEYWORD_DELETE_JOB "DELETE_JOB"
#define PLUGINSD_MAX_DIRECTORIES 20
extern char *plugin_directories[PLUGINSD_MAX_DIRECTORIES];
@ -47,9 +39,6 @@ struct plugind {
time_t started_t;
const DICTIONARY_ITEM *cfg_dict_item;
struct configurable_plugin *configuration;
struct plugind *prev;
struct plugind *next;
};

View file

@ -2,584 +2,62 @@
#include "pluginsd_dyncfg.h"
struct mutex_cond {
pthread_mutex_t lock;
pthread_cond_t cond;
int rc;
};
static void virt_fnc_got_data_cb(BUFFER *wb __maybe_unused, int code, void *callback_data)
{
struct mutex_cond *ctx = callback_data;
pthread_mutex_lock(&ctx->lock);
ctx->rc = code;
pthread_cond_broadcast(&ctx->cond);
pthread_mutex_unlock(&ctx->lock);
}
// ----------------------------------------------------------------------------
#define VIRT_FNC_TIMEOUT_S 10
#define VIRT_FNC_BUF_SIZE (4096)
void call_virtual_function_async(BUFFER *wb, RRDHOST *host, const char *name, const char *payload, rrd_function_result_callback_t callback, void *callback_data) {
PARSER *parser = NULL;
PARSER_RC pluginsd_config(char **words, size_t num_words, PARSER *parser) {
RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_CONFIG);
if(!host) return PARSER_RC_ERROR;
//TODO simplify (as we really need only first parameter to get plugin name maybe we can avoid parsing all)
char *words[PLUGINSD_MAX_WORDS];
char *function_with_params = strdupz(name);
size_t num_words = quoted_strings_splitter(function_with_params, words, PLUGINSD_MAX_WORDS, isspace_map_pluginsd);
size_t i = 1;
char *id = get_word(words, num_words, i++);
char *action = get_word(words, num_words, i++);
if (num_words < 2) {
netdata_log_error("PLUGINSD: virtual function name is empty.");
freez(function_with_params);
return;
if(strcmp(action, PLUGINSD_KEYWORD_CONFIG_ACTION_CREATE) == 0) {
char *status_str = get_word(words, num_words, i++);
char *type_str = get_word(words, num_words, i++);
char *path = get_word(words, num_words, i++);
char *source_type_str = get_word(words, num_words, i++);
char *source = get_word(words, num_words, i++);
char *supported_cmds_str = get_word(words, num_words, i++);
DYNCFG_STATUS status = dyncfg_status2id(status_str);
DYNCFG_TYPE type = dyncfg_type2id(type_str);
DYNCFG_SOURCE_TYPE source_type = dyncfg_source_type2id(source_type_str);
DYNCFG_CMDS cmds = dyncfg_cmds2id(supported_cmds_str);
if(!dyncfg_add_low_level(
host,
id,
path,
status,
type,
source_type,
source,
cmds,
0,
0,
false,
pluginsd_function_execute_cb,
parser))
return PARSER_RC_ERROR;
}
const DICTIONARY_ITEM *cpi = dictionary_get_and_acquire_item(host->configurable_plugins, get_word(words, num_words, 1));
if (unlikely(cpi == NULL)) {
netdata_log_error("PLUGINSD: virtual function plugin '%s' not found.", name);
freez(function_with_params);
return;
else if(strcmp(action, PLUGINSD_KEYWORD_CONFIG_ACTION_DELETE) == 0) {
dyncfg_del_low_level(host, id);
}
struct configurable_plugin *cp = dictionary_acquired_item_value(cpi);
parser = (PARSER *)cp->cb_usr_ctx;
BUFFER *function_out = buffer_create(VIRT_FNC_BUF_SIZE, NULL);
// if we are forwarding this to a plugin (as opposed to streaming/child) we have to remove the first parameter (plugin_name)
buffer_strcat(function_out, get_word(words, num_words, 0));
for (size_t i = 1; i < num_words; i++) {
if (i == 1 && SERVING_PLUGINSD(parser))
continue;
buffer_sprintf(function_out, " %s", get_word(words, num_words, i));
else if(strcmp(action, PLUGINSD_KEYWORD_CONFIG_ACTION_STATUS) == 0) {
char *status_str = get_word(words, num_words, i++);
dyncfg_status_low_level(host, id, dyncfg_status2id(status_str));
}
freez(function_with_params);
usec_t now_ut = now_monotonic_usec();
struct inflight_function tmp = {
.started_monotonic_ut = now_ut,
.result_body_wb = wb,
.timeout_s = VIRT_FNC_TIMEOUT_S,
.function = string_strdupz(buffer_tostring(function_out)),
.payload = payload != NULL ? strdupz(payload) : NULL,
.virtual = true,
.result = {
.cb = callback,
.data = callback_data,
},
.dyncfg = {
.stop_monotonic_ut = now_ut + VIRT_FNC_TIMEOUT_S * USEC_PER_SEC,
}
};
tmp.stop_monotonic_ut = &tmp.dyncfg.stop_monotonic_ut;
buffer_free(function_out);
uuid_generate_time(tmp.transaction);
char key[UUID_COMPACT_STR_LEN];
uuid_unparse_lower_compact(tmp.transaction, key);
dictionary_write_lock(parser->inflight.functions);
// if there is any error, our dictionary callbacks will call the caller callback to notify
// the caller about the error - no need for error handling here.
dictionary_set(parser->inflight.functions, key, &tmp, sizeof(struct inflight_function));
if(!parser->inflight.smaller_monotonic_timeout_ut || *tmp.stop_monotonic_ut + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT < parser->inflight.smaller_monotonic_timeout_ut)
parser->inflight.smaller_monotonic_timeout_ut = *tmp.stop_monotonic_ut + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT;
// garbage collect stale inflight functions
if(parser->inflight.smaller_monotonic_timeout_ut < now_ut)
pluginsd_inflight_functions_garbage_collect(parser, now_ut);
dictionary_write_unlock(parser->inflight.functions);
}
dyncfg_config_t call_virtual_function_blocking(PARSER *parser, const char *name, int *rc, const char *payload) {
usec_t now_ut = now_monotonic_usec();
BUFFER *wb = buffer_create(VIRT_FNC_BUF_SIZE, NULL);
struct mutex_cond cond = {
.lock = PTHREAD_MUTEX_INITIALIZER,
.cond = PTHREAD_COND_INITIALIZER
};
struct inflight_function tmp = {
.started_monotonic_ut = now_ut,
.result_body_wb = wb,
.timeout_s = VIRT_FNC_TIMEOUT_S,
.function = string_strdupz(name),
.payload = payload != NULL ? strdupz(payload) : NULL,
.virtual = true,
.result = {
.cb = virt_fnc_got_data_cb,
.data = &cond,
},
.dyncfg = {
.stop_monotonic_ut = now_ut + VIRT_FNC_TIMEOUT_S * USEC_PER_SEC,
}
};
tmp.stop_monotonic_ut = &tmp.dyncfg.stop_monotonic_ut;
uuid_generate_time(tmp.transaction);
char key[UUID_COMPACT_STR_LEN];
uuid_unparse_lower_compact(tmp.transaction, key);
dictionary_write_lock(parser->inflight.functions);
// if there is any error, our dictionary callbacks will call the caller callback to notify
// the caller about the error - no need for error handling here.
dictionary_set(parser->inflight.functions, key, &tmp, sizeof(struct inflight_function));
if(!parser->inflight.smaller_monotonic_timeout_ut || *tmp.stop_monotonic_ut + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT < parser->inflight.smaller_monotonic_timeout_ut)
parser->inflight.smaller_monotonic_timeout_ut = *tmp.stop_monotonic_ut + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT;
// garbage collect stale inflight functions
if(parser->inflight.smaller_monotonic_timeout_ut < now_ut)
pluginsd_inflight_functions_garbage_collect(parser, now_ut);
dictionary_write_unlock(parser->inflight.functions);
struct timespec tp;
clock_gettime(CLOCK_REALTIME, &tp);
tp.tv_sec += (time_t)VIRT_FNC_TIMEOUT_S;
pthread_mutex_lock(&cond.lock);
int ret = pthread_cond_timedwait(&cond.cond, &cond.lock, &tp);
if (ret == ETIMEDOUT)
netdata_log_error("PLUGINSD: DYNCFG virtual function %s timed out", name);
pthread_mutex_unlock(&cond.lock);
dyncfg_config_t cfg;
cfg.data = strdupz(buffer_tostring(wb));
cfg.data_size = buffer_strlen(wb);
if (rc != NULL)
*rc = cond.rc;
buffer_free(wb);
return cfg;
}
#define CVF_MAX_LEN (1024)
static dyncfg_config_t get_plugin_config_cb(void *usr_ctx, const char *plugin_name)
{
PARSER *parser = usr_ctx;
if (SERVING_STREAMING(parser)) {
char buf[CVF_MAX_LEN + 1];
snprintfz(buf, CVF_MAX_LEN, FUNCTION_NAME_GET_PLUGIN_CONFIG " %s", plugin_name);
return call_virtual_function_blocking(parser, buf, NULL, NULL);
}
return call_virtual_function_blocking(parser, FUNCTION_NAME_GET_PLUGIN_CONFIG, NULL, NULL);
}
static dyncfg_config_t get_plugin_config_schema_cb(void *usr_ctx, const char *plugin_name)
{
PARSER *parser = usr_ctx;
if (SERVING_STREAMING(parser)) {
char buf[CVF_MAX_LEN + 1];
snprintfz(buf, CVF_MAX_LEN, FUNCTION_NAME_GET_PLUGIN_CONFIG_SCHEMA " %s", plugin_name);
return call_virtual_function_blocking(parser, buf, NULL, NULL);
}
return call_virtual_function_blocking(parser, "get_plugin_config_schema", NULL, NULL);
}
static dyncfg_config_t get_module_config_cb(void *usr_ctx, const char *plugin_name, const char *module_name)
{
PARSER *parser = usr_ctx;
BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL);
buffer_strcat(wb, FUNCTION_NAME_GET_MODULE_CONFIG);
if (SERVING_STREAMING(parser))
buffer_sprintf(wb, " %s", plugin_name);
buffer_sprintf(wb, " %s", module_name);
dyncfg_config_t ret = call_virtual_function_blocking(parser, buffer_tostring(wb), NULL, NULL);
buffer_free(wb);
return ret;
}
static dyncfg_config_t get_module_config_schema_cb(void *usr_ctx, const char *plugin_name, const char *module_name)
{
PARSER *parser = usr_ctx;
BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL);
buffer_strcat(wb, FUNCTION_NAME_GET_MODULE_CONFIG_SCHEMA);
if (SERVING_STREAMING(parser))
buffer_sprintf(wb, " %s", plugin_name);
buffer_sprintf(wb, " %s", module_name);
dyncfg_config_t ret = call_virtual_function_blocking(parser, buffer_tostring(wb), NULL, NULL);
buffer_free(wb);
return ret;
}
static dyncfg_config_t get_job_config_schema_cb(void *usr_ctx, const char *plugin_name, const char *module_name)
{
PARSER *parser = usr_ctx;
BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL);
buffer_strcat(wb, FUNCTION_NAME_GET_JOB_CONFIG_SCHEMA);
if (SERVING_STREAMING(parser))
buffer_sprintf(wb, " %s", plugin_name);
buffer_sprintf(wb, " %s", module_name);
dyncfg_config_t ret = call_virtual_function_blocking(parser, buffer_tostring(wb), NULL, NULL);
buffer_free(wb);
return ret;
}
static dyncfg_config_t get_job_config_cb(void *usr_ctx, const char *plugin_name, const char *module_name, const char* job_name)
{
PARSER *parser = usr_ctx;
BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL);
buffer_strcat(wb, FUNCTION_NAME_GET_JOB_CONFIG);
if (SERVING_STREAMING(parser))
buffer_sprintf(wb, " %s", plugin_name);
buffer_sprintf(wb, " %s %s", module_name, job_name);
dyncfg_config_t ret = call_virtual_function_blocking(parser, buffer_tostring(wb), NULL, NULL);
buffer_free(wb);
return ret;
}
enum set_config_result set_plugin_config_cb(void *usr_ctx, const char *plugin_name, dyncfg_config_t *cfg)
{
PARSER *parser = usr_ctx;
BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL);
buffer_strcat(wb, FUNCTION_NAME_SET_PLUGIN_CONFIG);
if (SERVING_STREAMING(parser))
buffer_sprintf(wb, " %s", plugin_name);
int rc;
call_virtual_function_blocking(parser, buffer_tostring(wb), &rc, cfg->data);
buffer_free(wb);
if(rc != DYNCFG_VFNC_RET_CFG_ACCEPTED)
return SET_CONFIG_REJECTED;
return SET_CONFIG_ACCEPTED;
}
enum set_config_result set_module_config_cb(void *usr_ctx, const char *plugin_name, const char *module_name, dyncfg_config_t *cfg)
{
PARSER *parser = usr_ctx;
BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL);
buffer_strcat(wb, FUNCTION_NAME_SET_MODULE_CONFIG);
if (SERVING_STREAMING(parser))
buffer_sprintf(wb, " %s", plugin_name);
buffer_sprintf(wb, " %s", module_name);
int rc;
call_virtual_function_blocking(parser, buffer_tostring(wb), &rc, cfg->data);
buffer_free(wb);
if(rc != DYNCFG_VFNC_RET_CFG_ACCEPTED)
return SET_CONFIG_REJECTED;
return SET_CONFIG_ACCEPTED;
}
enum set_config_result set_job_config_cb(void *usr_ctx, const char *plugin_name, const char *module_name, const char *job_name, dyncfg_config_t *cfg)
{
PARSER *parser = usr_ctx;
BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL);
buffer_strcat(wb, FUNCTION_NAME_SET_JOB_CONFIG);
if (SERVING_STREAMING(parser))
buffer_sprintf(wb, " %s", plugin_name);
buffer_sprintf(wb, " %s %s", module_name, job_name);
int rc;
call_virtual_function_blocking(parser, buffer_tostring(wb), &rc, cfg->data);
buffer_free(wb);
if(rc != DYNCFG_VFNC_RET_CFG_ACCEPTED)
return SET_CONFIG_REJECTED;
return SET_CONFIG_ACCEPTED;
}
enum set_config_result delete_job_cb(void *usr_ctx, const char *plugin_name ,const char *module_name, const char *job_name)
{
PARSER *parser = usr_ctx;
BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL);
buffer_strcat(wb, FUNCTION_NAME_DELETE_JOB);
if (SERVING_STREAMING(parser))
buffer_sprintf(wb, " %s", plugin_name);
buffer_sprintf(wb, " %s %s", module_name, job_name);
int rc;
call_virtual_function_blocking(parser, buffer_tostring(wb), &rc, NULL);
buffer_free(wb);
if(rc != DYNCFG_VFNC_RET_CFG_ACCEPTED)
return SET_CONFIG_REJECTED;
return SET_CONFIG_ACCEPTED;
}
PARSER_RC pluginsd_register_plugin(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser __maybe_unused) {
netdata_log_info("PLUGINSD: DYNCFG_ENABLE");
if (unlikely (num_words != 2))
return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_ENABLE, "missing name parameter");
struct configurable_plugin *cfg = callocz(1, sizeof(struct configurable_plugin));
cfg->name = strdupz(words[1]);
cfg->set_config_cb = set_plugin_config_cb;
cfg->get_config_cb = get_plugin_config_cb;
cfg->get_config_schema_cb = get_plugin_config_schema_cb;
cfg->cb_usr_ctx = parser;
const DICTIONARY_ITEM *di = register_plugin(parser->user.host->configurable_plugins, cfg, SERVING_PLUGINSD(parser));
if (unlikely(di == NULL)) {
freez(cfg->name);
freez(cfg);
return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_ENABLE, "error registering plugin");
}
if (SERVING_PLUGINSD(parser)) {
// this is optimization for pluginsd to avoid extra dictionary lookup
// as we know which plugin is comunicating with us
parser->user.cd->cfg_dict_item = di;
parser->user.cd->configuration = cfg;
} else {
// register_plugin keeps the item acquired, so we need to release it
dictionary_acquired_item_release(parser->user.host->configurable_plugins, di);
}
rrdpush_send_dyncfg_enable(parser->user.host, cfg->name);
return PARSER_RC_OK;
}
#define LOG_MSG_SIZE (1024)
#define MODULE_NAME_IDX (SERVING_PLUGINSD(parser) ? 1 : 2)
#define MODULE_TYPE_IDX (SERVING_PLUGINSD(parser) ? 2 : 3)
PARSER_RC pluginsd_register_module(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser __maybe_unused) {
netdata_log_info("PLUGINSD: DYNCFG_REG_MODULE");
size_t expected_num_words = SERVING_PLUGINSD(parser) ? 3 : 4;
if (unlikely(num_words != expected_num_words)) {
char log[LOG_MSG_SIZE + 1];
snprintfz(log, LOG_MSG_SIZE, "expected %zu (got %zu) parameters: %smodule_name module_type", expected_num_words - 1, num_words - 1, SERVING_PLUGINSD(parser) ? "" : "plugin_name ");
return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_MODULE, log);
}
struct configurable_plugin *plug_cfg;
const DICTIONARY_ITEM *di = NULL;
if (SERVING_PLUGINSD(parser)) {
plug_cfg = parser->user.cd->configuration;
if (unlikely(plug_cfg == NULL))
return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_MODULE, "you have to enable dynamic configuration first using " PLUGINSD_KEYWORD_DYNCFG_ENABLE);
} else {
di = dictionary_get_and_acquire_item(parser->user.host->configurable_plugins, words[1]);
if (unlikely(di == NULL))
return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_MODULE, "plugin not found");
plug_cfg = (struct configurable_plugin *)dictionary_acquired_item_value(di);
}
struct module *mod = callocz(1, sizeof(struct module));
mod->type = str2_module_type(words[MODULE_TYPE_IDX]);
if (unlikely(mod->type == MOD_TYPE_UNKNOWN)) {
freez(mod);
return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_MODULE, "unknown module type (allowed: job_array, single)");
}
mod->name = strdupz(words[MODULE_NAME_IDX]);
mod->set_config_cb = set_module_config_cb;
mod->get_config_cb = get_module_config_cb;
mod->get_config_schema_cb = get_module_config_schema_cb;
mod->config_cb_usr_ctx = parser;
mod->get_job_config_cb = get_job_config_cb;
mod->get_job_config_schema_cb = get_job_config_schema_cb;
mod->set_job_config_cb = set_job_config_cb;
mod->delete_job_cb = delete_job_cb;
mod->job_config_cb_usr_ctx = parser;
register_module(parser->user.host->configurable_plugins, plug_cfg, mod, SERVING_PLUGINSD(parser));
if (di != NULL)
dictionary_acquired_item_release(parser->user.host->configurable_plugins, di);
rrdpush_send_dyncfg_reg_module(parser->user.host, plug_cfg->name, mod->name, mod->type);
return PARSER_RC_OK;
}
static inline PARSER_RC pluginsd_register_job_common(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser __maybe_unused, const char *plugin_name) {
const char *module_name = words[0];
const char *job_name = words[1];
const char *job_type_str = words[2];
const char *flags_str = words[3];
long f = str2l(flags_str);
if (f < 0)
return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_JOB, "invalid flags received");
dyncfg_job_flg_t flags = f;
if (SERVING_PLUGINSD(parser))
flags |= JOB_FLG_PLUGIN_PUSHED;
else
flags |= JOB_FLG_STREAMING_PUSHED;
enum job_type job_type = dyncfg_str2job_type(job_type_str);
if (job_type == JOB_TYPE_UNKNOWN)
return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_JOB, "unknown job type");
if (SERVING_PLUGINSD(parser) && job_type == JOB_TYPE_USER)
return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_JOB, "plugins cannot push jobs of type \"user\" (this is allowed only in streaming)");
if (register_job(parser->user.host->configurable_plugins, plugin_name, module_name, job_name, job_type, flags, 0)) // ignore existing is off as this is explicitly called register job
return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_JOB, "error registering job");
rrdpush_send_dyncfg_reg_job(parser->user.host, plugin_name, module_name, job_name, job_type, flags);
nd_log(NDLS_COLLECTORS, NDLP_WARNING, "DYNCFG: unknown action '%s' received from plugin", action);
parser->user.data_collections_count++;
return PARSER_RC_OK;
}
PARSER_RC pluginsd_register_job(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser __maybe_unused) {
size_t expected_num_words = SERVING_PLUGINSD(parser) ? 5 : 6;
if (unlikely(num_words != expected_num_words)) {
char log[LOG_MSG_SIZE + 1];
snprintfz(log, LOG_MSG_SIZE, "expected %zu (got %zu) parameters: %smodule_name job_name job_type", expected_num_words - 1, num_words - 1, SERVING_PLUGINSD(parser) ? "" : "plugin_name ");
return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_JOB, log);
}
if (SERVING_PLUGINSD(parser)) {
return pluginsd_register_job_common(&words[1], num_words - 1, parser, parser->user.cd->configuration->name);
}
return pluginsd_register_job_common(&words[2], num_words - 2, parser, words[1]);
}
PARSER_RC pluginsd_dyncfg_reset(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser __maybe_unused) {
if (unlikely(num_words != (SERVING_PLUGINSD(parser) ? 1 : 2)))
return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_RESET, SERVING_PLUGINSD(parser) ? "expected 0 parameters" : "expected 1 parameter: plugin_name");
if (SERVING_PLUGINSD(parser)) {
unregister_plugin(parser->user.host->configurable_plugins, parser->user.cd->cfg_dict_item);
rrdpush_send_dyncfg_reset(parser->user.host, parser->user.cd->configuration->name);
parser->user.cd->configuration = NULL;
} else {
const DICTIONARY_ITEM *di = dictionary_get_and_acquire_item(parser->user.host->configurable_plugins, words[1]);
if (unlikely(di == NULL))
return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_RESET, "plugin not found");
unregister_plugin(parser->user.host->configurable_plugins, di);
rrdpush_send_dyncfg_reset(parser->user.host, words[1]);
}
// ----------------------------------------------------------------------------
PARSER_RC pluginsd_dyncfg_noop(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser __maybe_unused) {
return PARSER_RC_OK;
}
static inline PARSER_RC pluginsd_job_status_common(char **words, size_t num_words, PARSER *parser, const char *plugin_name) {
int state = str2i(words[3]);
enum job_status status = str2job_state(words[2]);
if (unlikely(SERVING_PLUGINSD(parser) && status == JOB_STATUS_UNKNOWN))
return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_REPORT_JOB_STATUS, "unknown job status");
char *message = NULL;
if (num_words == 5 && strlen(words[4]) > 0)
message = words[4];
const DICTIONARY_ITEM *plugin_item;
DICTIONARY *job_dict;
const DICTIONARY_ITEM *job_item = report_job_status_acq_lock(parser->user.host->configurable_plugins, &plugin_item, &job_dict, plugin_name, words[0], words[1], status, state, message);
if (job_item != NULL) {
struct job *job = dictionary_acquired_item_value(job_item);
rrdpush_send_job_status_update(parser->user.host, plugin_name, words[0], job);
pthread_mutex_unlock(&job->lock);
dictionary_acquired_item_release(job_dict, job_item);
dictionary_acquired_item_release(parser->user.host->configurable_plugins, plugin_item);
}
return PARSER_RC_OK;
}
// job_status [plugin_name if streaming] <module_name> <job_name> <status_code> <state> [message]
PARSER_RC pluginsd_job_status(char **words, size_t num_words, PARSER *parser) {
if (SERVING_PLUGINSD(parser)) {
if (unlikely(num_words != 5 && num_words != 6))
return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_REPORT_JOB_STATUS, "expected 4 or 5 parameters: module_name, job_name, status_code, state, [optional: message]");
} else {
if (unlikely(num_words != 6 && num_words != 7))
return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_REPORT_JOB_STATUS, "expected 5 or 6 parameters: plugin_name, module_name, job_name, status_code, state, [optional: message]");
}
if (SERVING_PLUGINSD(parser)) {
return pluginsd_job_status_common(&words[1], num_words - 1, parser, parser->user.cd->configuration->name);
}
return pluginsd_job_status_common(&words[2], num_words - 2, parser, words[1]);
}
PARSER_RC pluginsd_delete_job(char **words, size_t num_words, PARSER *parser) {
// this can confuse a bit but there is a diference between KEYWORD_DELETE_JOB and actual delete_job function
// they are of opossite direction
if (num_words != 4)
return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DELETE_JOB, "expected 2 parameters: plugin_name, module_name, job_name");
const char *plugin_name = get_word(words, num_words, 1);
const char *module_name = get_word(words, num_words, 2);
const char *job_name = get_word(words, num_words, 3);
if (SERVING_STREAMING(parser))
delete_job_pname(parser->user.host->configurable_plugins, plugin_name, module_name, job_name);
// forward to parent if any
rrdpush_send_job_deleted(parser->user.host, plugin_name, module_name, job_name);
return PARSER_RC_OK;
}
void pluginsd_dyncfg_cleanup(PARSER *parser) {
if (parser->user.cd != NULL && parser->user.cd->configuration != NULL) {
unregister_plugin(parser->user.host->configurable_plugins, parser->user.cd->cfg_dict_item);
parser->user.cd->configuration = NULL;
} else if (parser->user.host != NULL && SERVING_STREAMING(parser) && parser->user.host != localhost){
dictionary_flush(parser->user.host->configurable_plugins);
}
}

View file

@ -5,13 +5,7 @@
#include "pluginsd_internals.h"
PARSER_RC pluginsd_register_plugin(char **words, size_t num_words, PARSER *parser);
PARSER_RC pluginsd_register_module(char **words, size_t num_words, PARSER *parser);
PARSER_RC pluginsd_register_job(char **words, size_t num_words, PARSER *parser);
PARSER_RC pluginsd_dyncfg_reset(char **words, size_t num_words, PARSER *parser);
PARSER_RC pluginsd_job_status(char **words, size_t num_words, PARSER *parser);
PARSER_RC pluginsd_delete_job(char **words, size_t num_words, PARSER *parser);
void pluginsd_dyncfg_cleanup(PARSER *parser);
PARSER_RC pluginsd_config(char **words, size_t num_words, PARSER *parser);
PARSER_RC pluginsd_dyncfg_noop(char **words, size_t num_words, PARSER *parser);
#endif //NETDATA_PLUGINSD_DYNCFG_H

View file

@ -21,47 +21,52 @@ static void inflight_functions_insert_callback(const DICTIONARY_ITEM *item, void
if(rc != 0)
netdata_log_error("FUNCTION: '%s': cannot parse transaction UUID", string2str(pf->function));
char buffer[2048 + 1];
snprintfz(buffer, sizeof(buffer) - 1, "%s %s %d \"%s\"\n",
pf->payload ? PLUGINSD_KEYWORD_FUNCTION_PAYLOAD : PLUGINSD_KEYWORD_FUNCTION,
transaction,
pf->timeout_s,
string2str(pf->function));
CLEAN_BUFFER *buffer = buffer_create(1024, NULL);
if(pf->payload && buffer_strlen(pf->payload)) {
buffer_sprintf(
buffer,
PLUGINSD_KEYWORD_FUNCTION_PAYLOAD " %s %d \"%s\" \"%s\" \"%s\"\n",
transaction,
pf->timeout_s,
string2str(pf->function),
pf->source ? pf->source : "",
content_type_id2string(pf->payload->content_type)
);
buffer_fast_strcat(buffer, buffer_tostring(pf->payload), buffer_strlen(pf->payload));
buffer_strcat(buffer, "\nFUNCTION_PAYLOAD_END\n");
}
else {
buffer_sprintf(
buffer,
PLUGINSD_KEYWORD_FUNCTION " %s %d \"%s\" \"%s\"\n",
transaction,
pf->timeout_s,
string2str(pf->function),
pf->source ? pf->source : ""
);
}
// send the command to the plugin
ssize_t ret = send_to_plugin(buffer, parser);
// IMPORTANT: make sure all commands are sent in 1 call, because in streaming they may interfere with others
ssize_t ret = send_to_plugin(buffer_tostring(buffer), parser);
pf->sent_monotonic_ut = now_monotonic_usec();
if(ret < 0) {
pf->sent_successfully = false;
pf->code = HTTP_RESP_SERVICE_UNAVAILABLE;
netdata_log_error("FUNCTION '%s': failed to send it to the plugin, error %zd", string2str(pf->function), ret);
rrd_call_function_error(pf->result_body_wb, "Failed to communicate with collector", HTTP_RESP_SERVICE_UNAVAILABLE);
rrd_call_function_error(pf->result_body_wb, "Failed to communicate with collector", pf->code);
}
else {
pf->sent_successfully = true;
internal_error(LOG_FUNCTIONS,
"FUNCTION '%s' with transaction '%s' sent to collector (%zd bytes, in %"PRIu64" usec)",
string2str(pf->function), dictionary_acquired_item_name(item), ret,
pf->sent_monotonic_ut - pf->started_monotonic_ut);
string2str(pf->function), dictionary_acquired_item_name(item), ret,
pf->sent_monotonic_ut - pf->started_monotonic_ut);
}
if (!pf->payload)
return;
// send the payload to the plugin
ret = send_to_plugin(pf->payload, parser);
if(ret < 0) {
netdata_log_error("FUNCTION_PAYLOAD '%s': failed to send function to plugin, error %zd", string2str(pf->function), ret);
rrd_call_function_error(pf->result_body_wb, "Failed to communicate with collector", HTTP_RESP_SERVICE_UNAVAILABLE);
}
else {
internal_error(LOG_FUNCTIONS,
"FUNCTION_PAYLOAD '%s' with transaction '%s' sent to collector (%zd bytes, in %"PRIu64" usec)",
string2str(pf->function), dictionary_acquired_item_name(item), ret,
pf->sent_monotonic_ut - pf->started_monotonic_ut);
}
send_to_plugin("\nFUNCTION_PAYLOAD_END\n", parser);
}
static bool inflight_functions_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *func __maybe_unused, void *new_func, void *parser_ptr __maybe_unused) {
@ -75,83 +80,22 @@ static bool inflight_functions_conflict_callback(const DICTIONARY_ITEM *item __m
return false;
}
static void delete_job_finalize(struct parser *parser __maybe_unused, struct configurable_plugin *plug, const char *fnc_sig, int code) {
if (code != DYNCFG_VFNC_RET_CFG_ACCEPTED)
return;
char *params_local = strdupz(fnc_sig);
char *words[DYNCFG_MAX_WORDS];
size_t words_c = quoted_strings_splitter(params_local, words, DYNCFG_MAX_WORDS, isspace_map_pluginsd);
if (words_c != 3) {
netdata_log_error("PLUGINSD_PARSER: invalid number of parameters for delete_job");
freez(params_local);
return;
}
const char *module = words[1];
const char *job = words[2];
delete_job(plug, module, job);
unlink_job(plug->name, module, job);
rrdpush_send_job_deleted(localhost, plug->name, module, job);
freez(params_local);
}
static void set_job_finalize(struct parser *parser __maybe_unused, struct configurable_plugin *plug __maybe_unused, const char *fnc_sig, int code) {
if (code != DYNCFG_VFNC_RET_CFG_ACCEPTED)
return;
char *params_local = strdupz(fnc_sig);
char *words[DYNCFG_MAX_WORDS];
size_t words_c = quoted_strings_splitter(params_local, words, DYNCFG_MAX_WORDS, isspace_map_pluginsd);
if (words_c != 3) {
netdata_log_error("PLUGINSD_PARSER: invalid number of parameters for set_job_config");
freez(params_local);
return;
}
const char *module_name = get_word(words, words_c, 1);
const char *job_name = get_word(words, words_c, 2);
if (register_job(parser->user.host->configurable_plugins, parser->user.cd->configuration->name, module_name, job_name, JOB_TYPE_USER, JOB_FLG_USER_CREATED, 1)) {
freez(params_local);
return;
}
// only send this if it is not existing already (register_job cares for that)
rrdpush_send_dyncfg_reg_job(localhost, parser->user.cd->configuration->name, module_name, job_name, JOB_TYPE_USER, JOB_FLG_USER_CREATED);
freez(params_local);
}
static void inflight_functions_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *func, void *parser_ptr) {
struct inflight_function *pf = func;
struct parser *parser = (struct parser *)parser_ptr;
struct parser *parser = (struct parser *)parser_ptr; (void)parser;
internal_error(LOG_FUNCTIONS,
"FUNCTION '%s' result of transaction '%s' received from collector (%zu bytes, request %"PRIu64" usec, response %"PRIu64" usec)",
string2str(pf->function), dictionary_acquired_item_name(item),
buffer_strlen(pf->result_body_wb), pf->sent_monotonic_ut - pf->started_monotonic_ut, now_realtime_usec() - pf->sent_monotonic_ut);
if (pf->virtual && SERVING_PLUGINSD(parser)) {
if (pf->payload) {
if (strncmp(string2str(pf->function), FUNCTION_NAME_SET_JOB_CONFIG, strlen(FUNCTION_NAME_SET_JOB_CONFIG)) == 0)
set_job_finalize(parser, parser->user.cd->configuration, string2str(pf->function), pf->code);
dyn_conf_store_config(string2str(pf->function), pf->payload, parser->user.cd->configuration);
} else if (strncmp(string2str(pf->function), FUNCTION_NAME_DELETE_JOB, strlen(FUNCTION_NAME_DELETE_JOB)) == 0) {
delete_job_finalize(parser, parser->user.cd->configuration, string2str(pf->function), pf->code);
}
}
"FUNCTION '%s' result of transaction '%s' received from collector "
"(%zu bytes, request %"PRIu64" usec, response %"PRIu64" usec)",
string2str(pf->function), dictionary_acquired_item_name(item),
buffer_strlen(pf->result_body_wb),
pf->sent_monotonic_ut - pf->started_monotonic_ut, now_realtime_usec() - pf->sent_monotonic_ut);
pf->result.cb(pf->result_body_wb, pf->code, pf->result.data);
string_freez(pf->function);
freez((void *)pf->payload);
buffer_free((void *)pf->payload);
freez((void *)pf->source);
}
void pluginsd_inflight_functions_init(PARSER *parser) {
@ -202,10 +146,8 @@ static void pluginsd_function_cancel(void *data) {
internal_error(true, "PLUGINSD: sending function cancellation to plugin for transaction '%s'", transaction);
char buffer[2048 + 1];
snprintfz(buffer, sizeof(buffer) - 1, "%s %s\n",
PLUGINSD_KEYWORD_FUNCTION_CANCEL,
transaction);
char buffer[2048];
snprintfz(buffer, sizeof(buffer), PLUGINSD_KEYWORD_FUNCTION_CANCEL " %s\n", transaction);
// send the command to the plugin
ssize_t ret = send_to_plugin(buffer, t->parser);
@ -232,10 +174,8 @@ static void pluginsd_function_progress_to_plugin(void *data) {
internal_error(true, "PLUGINSD: sending function progress to plugin for transaction '%s'", transaction);
char buffer[2048 + 1];
snprintfz(buffer, sizeof(buffer) - 1, "%s %s\n",
PLUGINSD_KEYWORD_FUNCTION_PROGRESS,
transaction);
char buffer[2048];
snprintfz(buffer, sizeof(buffer), PLUGINSD_KEYWORD_FUNCTION_PROGRESS " %s\n", transaction);
// send the command to the plugin
ssize_t ret = send_to_plugin(buffer, t->parser);
@ -254,42 +194,36 @@ static void pluginsd_function_progress_to_plugin(void *data) {
// this is the function called from
// rrd_call_function_and_wait() and rrd_call_function_async()
static int pluginsd_function_execute_cb(uuid_t *transaction, BUFFER *result_body_wb,
usec_t *stop_monotonic_ut, const char *function,
void *execute_cb_data,
rrd_function_result_callback_t result_cb, void *result_cb_data,
rrd_function_progress_cb_t progress_cb, void *progress_cb_data,
rrd_function_is_cancelled_cb_t is_cancelled_cb __maybe_unused,
void *is_cancelled_cb_data __maybe_unused,
rrd_function_register_canceller_cb_t register_canceller_cb,
void *register_canceller_cb_data,
rrd_function_register_progresser_cb_t register_progresser_cb,
void *register_progresser_cb_data) {
PARSER *parser = execute_cb_data;
int pluginsd_function_execute_cb(struct rrd_function_execute *rfe, void *data) {
// IMPORTANT: this function MUST call the result_cb even on failures
PARSER *parser = data;
usec_t now_ut = now_monotonic_usec();
int timeout_s = (*stop_monotonic_ut - now_ut + USEC_PER_SEC / 2) / USEC_PER_SEC;
int timeout_s = (int)((*rfe->stop_monotonic_ut - now_ut + USEC_PER_SEC / 2) / USEC_PER_SEC);
struct inflight_function tmp = {
.started_monotonic_ut = now_ut,
.stop_monotonic_ut = stop_monotonic_ut,
.result_body_wb = result_body_wb,
.stop_monotonic_ut = rfe->stop_monotonic_ut,
.result_body_wb = rfe->result.wb,
.timeout_s = timeout_s,
.function = string_strdupz(function),
.payload = NULL,
.function = string_strdupz(rfe->function),
.payload = buffer_dup(rfe->payload),
.source = rfe->source ? strdupz(rfe->source) : NULL,
.parser = parser,
.result = {
.cb = result_cb,
.data = result_cb_data,
.cb = rfe->result.cb,
.data = rfe->result.data,
},
.progress = {
.cb = progress_cb,
.data = progress_cb_data,
.cb = rfe->progress.cb,
.data = rfe->progress.data,
},
};
uuid_copy(tmp.transaction, *transaction);
uuid_copy(tmp.transaction, *rfe->transaction);
char transaction_str[UUID_COMPACT_STR_LEN];
uuid_unparse_lower_compact(tmp.transaction, transaction_str);
@ -298,24 +232,35 @@ static int pluginsd_function_execute_cb(uuid_t *transaction, BUFFER *result_body
// if there is any error, our dictionary callbacks will call the caller callback to notify
// the caller about the error - no need for error handling here.
void *t = dictionary_set(parser->inflight.functions, transaction_str, &tmp, sizeof(struct inflight_function));
if(register_canceller_cb)
register_canceller_cb(register_canceller_cb_data, pluginsd_function_cancel, t);
if(register_progresser_cb && (parser->repertoire == PARSER_INIT_PLUGINSD ||
(parser->repertoire == PARSER_INIT_STREAMING && stream_has_capability(&parser->user, STREAM_CAP_PROGRESS))))
register_progresser_cb(register_progresser_cb_data, pluginsd_function_progress_to_plugin, t);
if(!parser->inflight.smaller_monotonic_timeout_ut || *tmp.stop_monotonic_ut + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT < parser->inflight.smaller_monotonic_timeout_ut)
parser->inflight.smaller_monotonic_timeout_ut = *tmp.stop_monotonic_ut + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT;
// garbage collect stale inflight functions
if(parser->inflight.smaller_monotonic_timeout_ut < now_ut)
struct inflight_function *t = dictionary_set(parser->inflight.functions, transaction_str, &tmp, sizeof(struct inflight_function));
if(!t->sent_successfully) {
int code = t->code;
dictionary_write_unlock(parser->inflight.functions);
dictionary_del(parser->inflight.functions, transaction_str);
pluginsd_inflight_functions_garbage_collect(parser, now_ut);
return code;
}
else {
if (rfe->register_canceller.cb)
rfe->register_canceller.cb(rfe->register_canceller.data, pluginsd_function_cancel, t);
dictionary_write_unlock(parser->inflight.functions);
if (rfe->register_progresser.cb &&
(parser->repertoire == PARSER_INIT_PLUGINSD || (parser->repertoire == PARSER_INIT_STREAMING &&
stream_has_capability(&parser->user, STREAM_CAP_PROGRESS))))
rfe->register_progresser.cb(rfe->register_progresser.data, pluginsd_function_progress_to_plugin, t);
return HTTP_RESP_OK;
if (!parser->inflight.smaller_monotonic_timeout_ut ||
*tmp.stop_monotonic_ut + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT < parser->inflight.smaller_monotonic_timeout_ut)
parser->inflight.smaller_monotonic_timeout_ut = *tmp.stop_monotonic_ut + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT;
// garbage collect stale inflight functions
if (parser->inflight.smaller_monotonic_timeout_ut < now_ut)
pluginsd_inflight_functions_garbage_collect(parser, now_ut);
dictionary_write_unlock(parser->inflight.functions);
return HTTP_RESP_OK;
}
}
PARSER_RC pluginsd_function(char **words, size_t num_words, PARSER *parser) {
@ -421,7 +366,7 @@ PARSER_RC pluginsd_function_result_begin(char **words, size_t num_words, PARSER
struct inflight_function *pf = inflight_function_find(parser, transaction);
if(pf) {
if(format && *format)
pf->result_body_wb->content_type = functions_format_to_content_type(format);
pf->result_body_wb->content_type = content_type_string2id(format);
pf->code = code;

View file

@ -11,13 +11,17 @@ struct inflight_function {
int code;
int timeout_s;
STRING *function;
BUFFER *payload;
const char *source;
BUFFER *result_body_wb;
usec_t *stop_monotonic_ut; // pointer to caller data
usec_t started_monotonic_ut;
usec_t sent_monotonic_ut;
const char *payload;
PARSER *parser;
bool virtual;
bool sent_successfully;
struct {
rrd_function_result_callback_t cb;
@ -28,10 +32,6 @@ struct inflight_function {
rrd_function_progress_cb_t cb;
void *data;
} progress;
struct {
usec_t stop_monotonic_ut;
} dyncfg;
};
PARSER_RC pluginsd_function(char **words, size_t num_words, PARSER *parser);
@ -42,4 +42,6 @@ void pluginsd_inflight_functions_init(PARSER *parser);
void pluginsd_inflight_functions_cleanup(PARSER *parser);
void pluginsd_inflight_functions_garbage_collect(PARSER *parser, usec_t now_ut);
int pluginsd_function_execute_cb(struct rrd_function_execute *rfe, void *data);
#endif //NETDATA_PLUGINSD_FUNCTIONS_H

View file

@ -94,7 +94,6 @@ void parser_destroy(PARSER *parser) {
if (unlikely(!parser))
return;
pluginsd_dyncfg_cleanup(parser);
pluginsd_inflight_functions_cleanup(parser);
freez(parser);

View file

@ -202,6 +202,7 @@ static inline PARSER_RC pluginsd_host_define_end(char **words __maybe_unused, si
false);
rrdhost_option_set(host, RRDHOST_OPTION_VIRTUAL_HOST);
dyncfg_host_init(host);
if(host->rrdlabels) {
rrdlabels_migrate_to_these(host->rrdlabels, parser->user.host_define.rrdlabels);
@ -1355,23 +1356,16 @@ PARSER_RC parser_execute(PARSER *parser, PARSER_KEYWORD *keyword, char **words,
case 99:
return pluginsd_exit(words, num_words, parser);
case 101:
return pluginsd_register_plugin(words, num_words, parser);
case 100:
return pluginsd_config(words, num_words, parser);
case 102:
return pluginsd_register_module(words, num_words, parser);
case 103:
return pluginsd_register_job(words, num_words, parser);
case 104:
return pluginsd_dyncfg_reset(words, num_words, parser);
case 110:
return pluginsd_job_status(words, num_words, parser);
case 111:
return pluginsd_delete_job(words, num_words, parser);
case 901:
case 902:
case 903:
case 904:
case 905:
case 906:
return pluginsd_dyncfg_noop(words, num_words, parser);
default:
break;

View file

@ -231,7 +231,7 @@ static inline int parser_action(PARSER *parser, char *input) {
rc = PARSER_RC_ERROR;
if(rc == PARSER_RC_ERROR) {
CLEAN_BUFFER *wb = buffer_create(PLUGINSD_LINE_MAX, NULL);
CLEAN_BUFFER *wb = buffer_create(1024, NULL);
line_splitter_reconstruct_line(wb, &parser->line);
netdata_log_error("PLUGINSD: parser_action('%s') failed on line %zu: { %s } (quotes added to show parsing)",
command, parser->line.count, buffer_tostring(wb));

View file

@ -1033,17 +1033,7 @@ static void add_labels_to_disk(struct disk *d, RRDSET *st) {
rrdlabels_add(st->rrdlabels, "device_type", get_disk_type_string(d->type), RRDLABEL_SRC_AUTO);
}
static int diskstats_function_block_devices(uuid_t *transaction __maybe_unused, BUFFER *wb,
usec_t *stop_monotonic_ut __maybe_unused, const char *function __maybe_unused,
void *collector_data __maybe_unused,
rrd_function_result_callback_t result_cb, void *result_cb_data,
rrd_function_progress_cb_t progress_cb __maybe_unused, void *progress_cb_data __maybe_unused,
rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
rrd_function_register_canceller_cb_t register_canceller_cb __maybe_unused,
void *register_canceller_cb_data __maybe_unused,
rrd_function_register_progresser_cb_t register_progresser_cb __maybe_unused,
void *register_progresser_cb_data __maybe_unused) {
static int diskstats_function_block_devices(BUFFER *wb, const char *function __maybe_unused) {
buffer_flush(wb);
wb->content_type = CT_APPLICATION_JSON;
buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT);
@ -1322,16 +1312,7 @@ static int diskstats_function_block_devices(uuid_t *transaction __maybe_unused,
buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1);
buffer_json_finalize(wb);
int response = HTTP_RESP_OK;
if(is_cancelled_cb && is_cancelled_cb(is_cancelled_cb_data)) {
buffer_flush(wb);
response = HTTP_RESP_CLIENT_CLOSED_REQUEST;
}
if(result_cb)
result_cb(wb, response, result_cb_data);
return response;
return HTTP_RESP_OK;
}
static void diskstats_cleanup_disks() {
@ -1478,6 +1459,10 @@ int do_proc_diskstats(int update_every, usec_t dt) {
excluded_disks = simple_pattern_create(
config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "exclude disks", DEFAULT_EXCLUDED_DISKS), NULL,
SIMPLE_PATTERN_EXACT, true);
rrd_function_add_inline(localhost, NULL, "block-devices", 10,
RRDFUNCTIONS_PRIORITY_DEFAULT, RRDFUNCTIONS_DISKSTATS_HELP,
"top", HTTP_ACCESS_ANY, diskstats_function_block_devices);
}
// --------------------------------------------------------------------------
@ -1492,14 +1477,6 @@ int do_proc_diskstats(int update_every, usec_t dt) {
ff = procfile_readall(ff);
if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
static bool add_func = true;
if (add_func) {
rrd_function_add(localhost, NULL, "block-devices", 10, RRDFUNCTIONS_PRIORITY_DEFAULT, RRDFUNCTIONS_DISKSTATS_HELP,
"top", HTTP_ACCESS_ANY, true,
diskstats_function_block_devices, NULL);
add_func = false;
}
size_t lines = procfile_lines(ff), l;
collected_number system_read_kb = 0, system_write_kb = 0;

View file

@ -473,17 +473,7 @@ static void netdev_rename_this_device(struct netdev *d) {
// ----------------------------------------------------------------------------
int netdev_function_net_interfaces(uuid_t *transaction __maybe_unused, BUFFER *wb,
usec_t *stop_monotonic_ut __maybe_unused, const char *function __maybe_unused,
void *collector_data __maybe_unused,
rrd_function_result_callback_t result_cb, void *result_cb_data,
rrd_function_progress_cb_t progress_cb __maybe_unused, void *progress_cb_data __maybe_unused,
rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
rrd_function_register_canceller_cb_t register_canceller_cb __maybe_unused,
void *register_canceller_cb_data __maybe_unused,
rrd_function_register_progresser_cb_t register_progresser_cb __maybe_unused,
void *register_progresser_cb_data __maybe_unused) {
int netdev_function_net_interfaces(BUFFER *wb, const char *function __maybe_unused) {
buffer_flush(wb);
wb->content_type = CT_APPLICATION_JSON;
buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT);
@ -1761,16 +1751,14 @@ void *netdev_main(void *ptr)
worker_register("NETDEV");
worker_register_job_name(0, "netdev");
if (getenv("KUBERNETES_SERVICE_HOST") != NULL && getenv("KUBERNETES_SERVICE_PORT") != NULL) {
if (getenv("KUBERNETES_SERVICE_HOST") != NULL && getenv("KUBERNETES_SERVICE_PORT") != NULL)
double_linked_device_collect_delay_secs = 300;
}
rrd_function_add_inline(localhost, NULL, "network-interfaces", 10,
RRDFUNCTIONS_PRIORITY_DEFAULT, RRDFUNCTIONS_NETDEV_HELP,
"top", HTTP_ACCESS_ANY, netdev_function_net_interfaces);
netdata_thread_cleanup_push(netdev_main_cleanup, ptr) {
rrd_collector_started();
rrd_function_add(localhost, NULL, "network-interfaces", 10, RRDFUNCTIONS_PRIORITY_DEFAULT, RRDFUNCTIONS_NETDEV_HELP,
"top", HTTP_ACCESS_ANY,
true, netdev_function_net_interfaces, NULL);
usec_t step = localhost->rrd_update_every * USEC_PER_SEC;
heartbeat_t hb;
heartbeat_init(&hb);

View file

@ -83,8 +83,8 @@ struct journal_file {
#define ND_SD_JOURNAL_OPEN_FLAGS (0)
#define JOURNAL_VS_REALTIME_DELTA_DEFAULT_UT (5 * USEC_PER_SEC) // assume always 5 seconds latency
#define JOURNAL_VS_REALTIME_DELTA_MAX_UT (2 * 60 * USEC_PER_SEC) // up to 2 minutes latency
#define JOURNAL_VS_REALTIME_DELTA_DEFAULT_UT (5 * USEC_PER_SEC) // assume a 5-seconds latency
#define JOURNAL_VS_REALTIME_DELTA_MAX_UT (2 * 60 * USEC_PER_SEC) // up to 2-minutes latency
extern DICTIONARY *journal_files_registry;
extern DICTIONARY *used_hashes_registry;
@ -114,21 +114,22 @@ usec_t journal_file_update_annotation_boot_id(sd_journal *j, struct journal_file
#define MAX_JOURNAL_DIRECTORIES 100
struct journal_directory {
char *path;
STRING *path;
};
extern struct journal_directory journal_directories[MAX_JOURNAL_DIRECTORIES];
void journal_init_files_and_directories(void);
void function_systemd_journal(const char *transaction, char *function, usec_t *stop_monotonic_ut, bool *cancelled);
void function_systemd_journal(const char *transaction, char *function, usec_t *stop_monotonic_ut, bool *cancelled, BUFFER *payload, const char *source, void *data);
void journal_file_update_header(const char *filename, struct journal_file *jf);
void netdata_systemd_journal_message_ids_init(void);
void netdata_systemd_journal_transform_message_id(FACETS *facets __maybe_unused, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope __maybe_unused, void *data __maybe_unused);
void netdata_systemd_journal_transform_message_id(FACETS *facets, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope, void *data);
void *journal_watcher_main(void *arg);
void journal_watcher_restart(void);
#ifdef ENABLE_SYSTEMD_DBUS
void function_systemd_units(const char *transaction, char *function, usec_t *stop_monotonic_ut, bool *cancelled);
void function_systemd_units(const char *transaction, char *function, usec_t *stop_monotonic_ut, bool *cancelled, BUFFER *payload, const char *source, void *data);
#endif
static inline void send_newline_and_flush(void) {
@ -157,4 +158,6 @@ static inline bool parse_journal_field(const char *data, size_t data_length, con
return true;
}
void systemd_journal_dyncfg_init(struct functions_evloop_globals *wg);
#endif //NETDATA_COLLECTORS_SYSTEMD_INTERNALS_H

View file

@ -0,0 +1,98 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "systemd-internals.h"
#define JOURNAL_DIRECTORIES_JSON_NODE "journalDirectories"
static int systemd_journal_directories_dyncfg_update(BUFFER *result, BUFFER *payload) {
if(!payload || !buffer_strlen(payload))
return dyncfg_default_response(result, HTTP_RESP_BAD_REQUEST, "empty payload received");
CLEAN_JSON_OBJECT *jobj = json_tokener_parse(buffer_tostring(payload));
if(!jobj)
return dyncfg_default_response(result, HTTP_RESP_BAD_REQUEST, "cannot parse json payload");
struct json_object *journalDirectories;
json_object_object_get_ex(jobj, JOURNAL_DIRECTORIES_JSON_NODE, &journalDirectories);
size_t n_directories = json_object_array_length(journalDirectories);
size_t added = 0;
for(size_t i = 0; i < n_directories; i++) {
struct json_object *dir = json_object_array_get_idx(journalDirectories, i);
const char *s = json_object_get_string(dir);
if(s && *s) {
string_freez(journal_directories[added].path);
journal_directories[added++].path = string_strdupz(s);
}
}
if(!added)
return dyncfg_default_response(result, HTTP_RESP_BAD_REQUEST, "no directories in the payload");
else {
for(size_t i = added; i < MAX_JOURNAL_DIRECTORIES; i++) {
string_freez(journal_directories[i].path);
journal_directories[i].path = NULL;
}
}
return dyncfg_default_response(result, HTTP_RESP_OK, "applied");
}
static int systemd_journal_directories_dyncfg_get(BUFFER *wb) {
buffer_flush(wb);
buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY);
buffer_json_member_add_array(wb, JOURNAL_DIRECTORIES_JSON_NODE);
for(size_t i = 0; i < MAX_JOURNAL_DIRECTORIES ;i++) {
if(!journal_directories[i].path)
break;
buffer_json_add_array_item_string(wb, string2str(journal_directories[i].path));
}
buffer_json_array_close(wb);
buffer_json_finalize(wb);
return HTTP_RESP_OK;
}
static int systemd_journal_directories_dyncfg_cb(const char *transaction,
const char *id,
DYNCFG_CMDS cmd,
BUFFER *payload,
usec_t *stop_monotonic_ut __maybe_unused,
bool *cancelled __maybe_unused,
BUFFER *result,
const char *source __maybe_unused,
void *data __maybe_unused) {
CLEAN_BUFFER *action = buffer_create(100, NULL);
dyncfg_cmds2buffer(cmd, action);
if(cmd == DYNCFG_CMD_GET)
return systemd_journal_directories_dyncfg_get(result);
if(cmd == DYNCFG_CMD_UPDATE)
return systemd_journal_directories_dyncfg_update(result, payload);
nd_log(NDLS_COLLECTORS, NDLP_ERR,
"DYNCFG: unhandled transaction '%s', id '%s' cmd '%s', payload: %s",
transaction, id, buffer_tostring(action), payload ? buffer_tostring(payload) : "");
return dyncfg_default_response(result, HTTP_RESP_BAD_REQUEST, "the command is not handled by this plugin");
}
// ----------------------------------------------------------------------------
void systemd_journal_dyncfg_init(struct functions_evloop_globals *wg) {
functions_evloop_dyncfg_add(
wg,
"systemd-journal:monitored-directories",
"/collectors/logs/systemd-journal",
DYNCFG_STATUS_RUNNING,
DYNCFG_TYPE_SINGLE,
DYNCFG_SOURCE_TYPE_INTERNAL,
"internal",
DYNCFG_CMD_SCHEMA | DYNCFG_CMD_GET | DYNCFG_CMD_UPDATE,
systemd_journal_directories_dyncfg_cb,
NULL);
}

View file

@ -715,7 +715,7 @@ void journal_files_registry_update(void) {
for(unsigned i = 0; i < MAX_JOURNAL_DIRECTORIES; i++) {
if(!journal_directories[i].path) break;
journal_directory_scan_recursively(files, dirs, journal_directories[i].path, 0);
journal_directory_scan_recursively(files, dirs, string2str(journal_directories[i].path), 0);
}
const char **array = mallocz(sizeof(const char *) * dictionary_entries(files));
@ -819,15 +819,15 @@ void journal_init_files_and_directories(void) {
// ------------------------------------------------------------------------
// setup the journal directories
journal_directories[d++].path = strdupz("/run/log/journal");
journal_directories[d++].path = strdupz("/var/log/journal");
journal_directories[d++].path = string_strdupz("/run/log/journal");
journal_directories[d++].path = string_strdupz("/var/log/journal");
if(*netdata_configured_host_prefix) {
char path[PATH_MAX];
snprintfz(path, sizeof(path), "%s/var/log/journal", netdata_configured_host_prefix);
journal_directories[d++].path = strdupz(path);
journal_directories[d++].path = string_strdupz(path);
snprintfz(path, sizeof(path), "%s/run/log/journal", netdata_configured_host_prefix);
journal_directories[d++].path = strdupz(path);
journal_directories[d++].path = string_strdupz(path);
}
// terminate the list

View file

@ -292,8 +292,16 @@ static void process_pending(Watcher *watcher) {
dictionary_garbage_collect(watcher->pending);
}
size_t journal_watcher_wanted_session_id = 0;
void journal_watcher_restart(void) {
__atomic_add_fetch(&journal_watcher_wanted_session_id, 1, __ATOMIC_RELAXED);
}
void *journal_watcher_main(void *arg __maybe_unused) {
while(1) {
size_t journal_watcher_session_id = journal_watcher_wanted_session_id;
Watcher watcher = {
.watchList = mallocz(INITIAL_WATCHES * sizeof(WatchEntry)),
.freeList = NULL,
@ -312,12 +320,12 @@ void *journal_watcher_main(void *arg __maybe_unused) {
for (unsigned i = 0; i < MAX_JOURNAL_DIRECTORIES; i++) {
if (!journal_directories[i].path) break;
watch_directory_and_subdirectories(&watcher, inotifyFd, journal_directories[i].path);
watch_directory_and_subdirectories(&watcher, inotifyFd, string2str(journal_directories[i].path));
}
usec_t last_headers_update_ut = now_monotonic_usec();
struct buffered_reader reader;
while (1) {
while (journal_watcher_session_id == __atomic_load_n(&journal_watcher_wanted_session_id, __ATOMIC_RELAXED)) {
buffered_reader_ret_t rc = buffered_reader_read_timeout(
&reader, inotifyFd, SYSTEMD_JOURNAL_EXECUTE_WATCHER_PENDING_EVERY_MS, false);
@ -372,7 +380,7 @@ void *journal_watcher_main(void *arg __maybe_unused) {
// this will scan the directories and cleanup the registry
journal_files_registry_update();
sleep_usec(5 * USEC_PER_SEC);
sleep_usec(2 * USEC_PER_SEC);
}
return NULL;

View file

@ -1520,7 +1520,8 @@ static void netdata_systemd_journal_function_help(const char *transaction) {
buffer_free(wb);
}
void function_systemd_journal(const char *transaction, char *function, usec_t *stop_monotonic_ut, bool *cancelled) {
void function_systemd_journal(const char *transaction, char *function, usec_t *stop_monotonic_ut, bool *cancelled,
BUFFER *payload __maybe_unused, const char *source __maybe_unused, void *data __maybe_unused) {
fstat_thread_calls = 0;
fstat_thread_cached_responses = 0;

View file

@ -11,7 +11,7 @@ static bool plugin_should_exit = false;
static bool journal_data_direcories_exist() {
struct stat st;
for (unsigned i = 0; i < MAX_JOURNAL_DIRECTORIES && journal_directories[i].path; i++) {
if ((stat(journal_directories[i].path, &st) == 0) && S_ISDIR(st.st_mode))
if ((stat(string2str(journal_directories[i].path), &st) == 0) && S_ISDIR(st.st_mode))
return true;
}
return false;
@ -49,7 +49,7 @@ int main(int argc __maybe_unused, char **argv __maybe_unused) {
char buf[] = "systemd-journal after:-8640000 before:0 direction:backward last:200 data_only:false slice:true source:all";
// char buf[] = "systemd-journal after:1695332964 before:1695937764 direction:backward last:100 slice:true source:all DHKucpqUoe1:PtVoyIuX.MU";
// char buf[] = "systemd-journal after:1694511062 before:1694514662 anchor:1694514122024403";
function_systemd_journal("123", buf, &stop_monotonic_ut, &cancelled);
function_systemd_journal("123", buf, &stop_monotonic_ut, &cancelled, NULL, NULL, NULL);
// function_systemd_units("123", "systemd-units", 600, &cancelled);
exit(1);
}
@ -57,7 +57,7 @@ int main(int argc __maybe_unused, char **argv __maybe_unused) {
if(argc == 2 && strcmp(argv[1], "debug-units") == 0) {
bool cancelled = false;
usec_t stop_monotonic_ut = now_monotonic_usec() + 600 * USEC_PER_SEC;
function_systemd_units("123", "systemd-units", &stop_monotonic_ut, &cancelled);
function_systemd_units("123", "systemd-units", &stop_monotonic_ut, &cancelled, NULL, NULL, NULL);
exit(1);
}
#endif
@ -75,14 +75,22 @@ int main(int argc __maybe_unused, char **argv __maybe_unused) {
struct functions_evloop_globals *wg =
functions_evloop_init(SYSTEMD_JOURNAL_WORKER_THREADS, "SDJ", &stdout_mutex, &plugin_should_exit);
functions_evloop_add_function(wg, SYSTEMD_JOURNAL_FUNCTION_NAME, function_systemd_journal,
SYSTEMD_JOURNAL_DEFAULT_TIMEOUT);
functions_evloop_add_function(wg,
SYSTEMD_JOURNAL_FUNCTION_NAME,
function_systemd_journal,
SYSTEMD_JOURNAL_DEFAULT_TIMEOUT,
NULL);
#ifdef ENABLE_SYSTEMD_DBUS
functions_evloop_add_function(wg, SYSTEMD_UNITS_FUNCTION_NAME, function_systemd_units,
SYSTEMD_UNITS_DEFAULT_TIMEOUT);
functions_evloop_add_function(wg,
SYSTEMD_UNITS_FUNCTION_NAME,
function_systemd_units,
SYSTEMD_UNITS_DEFAULT_TIMEOUT,
NULL);
#endif
systemd_journal_dyncfg_init(wg);
// ------------------------------------------------------------------------
// register functions to netdata
@ -106,7 +114,7 @@ int main(int argc __maybe_unused, char **argv __maybe_unused) {
usec_t step_ut = 100 * USEC_PER_MS;
usec_t send_newline_ut = 0;
usec_t since_last_scan_ut = SYSTEMD_JOURNAL_ALL_FILES_SCAN_EVERY_USEC * 2; // something big to trigger scanning at start
bool tty = isatty(fileno(stderr)) == 1;
bool tty = isatty(fileno(stdout)) == 1;
heartbeat_t hb;
heartbeat_init(&hb);

View file

@ -1596,7 +1596,9 @@ void systemd_units_assign_priority(UnitInfo *base) {
}
}
void function_systemd_units(const char *transaction, char *function, usec_t *stop_monotonic_ut __maybe_unused, bool *cancelled __maybe_unused) {
void function_systemd_units(const char *transaction, char *function,
usec_t *stop_monotonic_ut __maybe_unused, bool *cancelled __maybe_unused,
BUFFER *payload __maybe_unused, const char *source __maybe_unused, void *data __maybe_unused) {
char *words[SYSTEMD_UNITS_MAX_PARAMS] = { NULL };
size_t num_words = quoted_strings_splitter_pluginsd(function, words, SYSTEMD_UNITS_MAX_PARAMS);
for(int i = 1; i < SYSTEMD_UNITS_MAX_PARAMS ;i++) {

View file

@ -34,6 +34,8 @@
// ----------------------------------------------------------------------------
// netdata include files
#include "daemon/config/dyncfg.h"
#include "global_statistics.h"
// the netdata database

View file

@ -0,0 +1,83 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "dyncfg-internals.h"
#include "dyncfg.h"
// ----------------------------------------------------------------------------
// echo is when we send requests to plugins without any caller
// it is used for:
// 1. the first enable/disable requests we send, and also
// 2. updates to stock or user configurations
// 3. saved dynamic jobs we need to add to templates
struct dyncfg_echo {
const DICTIONARY_ITEM *item;
DYNCFG *df;
BUFFER *wb;
};
void dyncfg_echo_cb(BUFFER *wb __maybe_unused, int code, void *result_cb_data) {
struct dyncfg_echo *e = result_cb_data;
buffer_free(e->wb);
dictionary_acquired_item_release(dyncfg_globals.nodes, e->item);
e->wb = NULL;
e->df = NULL;
e->item = NULL;
freez(e);
}
void dyncfg_echo(const DICTIONARY_ITEM *item, DYNCFG *df, const char *id __maybe_unused, DYNCFG_CMDS cmd) {
if(!(df->cmds & cmd))
return;
const char *cmd_str = dyncfg_id2cmd_one(cmd);
if(!cmd_str) {
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: command given does not resolve to a known command");
return;
}
struct dyncfg_echo *e = callocz(1, sizeof(struct dyncfg_echo));
e->item = dictionary_acquired_item_dup(dyncfg_globals.nodes, item);
e->wb = buffer_create(0, NULL);
e->df = df;
char buf[string_strlen(df->function) + strlen(cmd_str) + 20];
snprintfz(buf, sizeof(buf), "%s %s", string2str(df->function), cmd_str);
rrd_function_run(df->host, e->wb, 10, HTTP_ACCESS_ADMIN, buf, false, NULL,
dyncfg_echo_cb, e,
NULL, NULL,
NULL, NULL,
NULL, NULL);
}
static void dyncfg_echo_payload(const DICTIONARY_ITEM *item, DYNCFG *df, const char *id __maybe_unused, const char *cmd) {
if(!df->payload)
return;
struct dyncfg_echo *e = callocz(1, sizeof(struct dyncfg_echo));
e->item = dictionary_acquired_item_dup(dyncfg_globals.nodes, item);
e->wb = buffer_create(0, NULL);
e->df = df;
char buf[string_strlen(df->function) + strlen(cmd) + 20];
snprintfz(buf, sizeof(buf), "%s %s", string2str(df->function), cmd);
rrd_function_run(df->host, e->wb, 10, HTTP_ACCESS_ADMIN, buf, false, NULL,
dyncfg_echo_cb, e,
NULL, NULL,
NULL, NULL,
df->payload, NULL);
}
void dyncfg_echo_update(const DICTIONARY_ITEM *item, DYNCFG *df, const char *id) {
dyncfg_echo_payload(item, df, id, "update");
}
void dyncfg_echo_add(const DICTIONARY_ITEM *template_item, DYNCFG *template_df, const char *template_id, const char *job_name) {
char buf[strlen(job_name) + 20];
snprintfz(buf, sizeof(buf), "add %s", job_name);
dyncfg_echo_payload(template_item, template_df, template_id, buf);
}

View file

@ -0,0 +1,223 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "dyncfg-internals.h"
#include "dyncfg.h"
void dyncfg_file_delete(const char *id) {
CLEAN_CHAR_P *escaped_id = dyncfg_escape_id_for_filename(id);
char filename[FILENAME_MAX];
snprintfz(filename, sizeof(filename), "%s/%s.dyncfg", dyncfg_globals.dir, escaped_id);
unlink(filename);
}
void dyncfg_file_save(const char *id, DYNCFG *df) {
CLEAN_CHAR_P *escaped_id = dyncfg_escape_id_for_filename(id);
char filename[FILENAME_MAX];
snprintfz(filename, sizeof(filename), "%s/%s.dyncfg", dyncfg_globals.dir, escaped_id);
FILE *fp = fopen(filename, "w");
if(!fp) {
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: cannot create file '%s'", filename);
return;
}
df->modified_ut = now_realtime_usec();
fprintf(fp, "version=%zu\n", DYNCFG_VERSION);
fprintf(fp, "id=%s\n", id);
if(df->template)
fprintf(fp, "template=%s\n", string2str(df->template));
char uuid_str[UUID_COMPACT_STR_LEN];
uuid_unparse_lower_compact(df->host_uuid, uuid_str);
fprintf(fp, "host=%s\n", uuid_str);
fprintf(fp, "path=%s\n", string2str(df->path));
fprintf(fp, "type=%s\n", dyncfg_id2type(df->type));
fprintf(fp, "source_type=%s\n", dyncfg_id2source_type(df->source_type));
fprintf(fp, "source=%s\n", string2str(df->source));
fprintf(fp, "created=%"PRIu64"\n", df->created_ut);
fprintf(fp, "modified=%"PRIu64"\n", df->modified_ut);
fprintf(fp, "sync=%s\n", df->sync ? "true" : "false");
fprintf(fp, "user_disabled=%s\n", df->user_disabled ? "true" : "false");
fprintf(fp, "saves=%"PRIu32"\n", ++df->saves);
fprintf(fp, "cmds=");
dyncfg_cmds2fp(df->cmds, fp);
fprintf(fp, "\n");
if(df->payload && buffer_strlen(df->payload) > 0) {
fprintf(fp, "content_type=%s\n", content_type_id2string(df->payload->content_type));
fprintf(fp, "content_length=%zu\n", buffer_strlen(df->payload));
fprintf(fp, "---\n");
fwrite(buffer_tostring(df->payload), 1, buffer_strlen(df->payload), fp);
}
fclose(fp);
}
void dyncfg_file_load(const char *filename) {
FILE *fp = fopen(filename, "r");
if (!fp) {
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: cannot open file '%s'", filename);
return;
}
DYNCFG tmp = {
.host = NULL,
.status = DYNCFG_STATUS_ORPHAN,
};
char line[PLUGINSD_LINE_MAX];
CLEAN_CHAR_P *id = NULL;
HTTP_CONTENT_TYPE content_type = CT_NONE;
size_t content_length = 0;
bool read_payload = false;
while (fgets(line, sizeof(line), fp)) {
if(strcmp(line, "---\n") == 0) {
read_payload = true;
break;
}
char *value = strchr(line, '=');
if(!value) continue;
*value++ = '\0';
value = trim(value);
if(!value) continue;
char *key = trim(line);
if(!key) continue;
// Parse key-value pairs
if (strcmp(key, "version") == 0) {
size_t version = strtoull(value, NULL, 10);
if(version > DYNCFG_VERSION)
nd_log(NDLS_DAEMON, NDLP_NOTICE,
"DYNCFG: configuration file '%s' has version %zu, which is newer than our version %zu",
filename, version, DYNCFG_VERSION);
} else if (strcmp(key, "id") == 0) {
freez(id);
id = strdupz(value);
} else if (strcmp(key, "template") == 0) {
tmp.template = string_strdupz(value);
} else if (strcmp(key, "host") == 0) {
uuid_parse_flexi(value, tmp.host_uuid);
} else if (strcmp(key, "path") == 0) {
tmp.path = string_strdupz(value);
} else if (strcmp(key, "type") == 0) {
tmp.type = dyncfg_type2id(value);
} else if (strcmp(key, "source_type") == 0) {
tmp.source_type = dyncfg_source_type2id(value);
} else if (strcmp(key, "source") == 0) {
tmp.source = string_strdupz(value);
} else if (strcmp(key, "created") == 0) {
tmp.created_ut = strtoull(value, NULL, 10);
} else if (strcmp(key, "modified") == 0) {
tmp.modified_ut = strtoull(value, NULL, 10);
} else if (strcmp(key, "sync") == 0) {
tmp.sync = (strcmp(value, "true") == 0);
} else if (strcmp(key, "user_disabled") == 0) {
tmp.user_disabled = (strcmp(value, "true") == 0);
} else if (strcmp(key, "saves") == 0) {
tmp.saves = strtoull(value, NULL, 10);
} else if (strcmp(key, "content_type") == 0) {
content_type = content_type_string2id(value);
} else if (strcmp(key, "content_length") == 0) {
content_length = strtoull(value, NULL, 10);
} else if (strcmp(key, "cmds") == 0) {
tmp.cmds = dyncfg_cmds2id(value);
}
}
if(read_payload && content_length) {
tmp.payload = buffer_create(content_length, NULL);
tmp.payload->content_type = content_type;
buffer_need_bytes(tmp.payload, content_length);
tmp.payload->len = fread(tmp.payload->buffer, 1, content_length, fp);
}
fclose(fp);
if(!id) {
nd_log(NDLS_DAEMON, NDLP_ERR,
"DYNCFG: configuration file '%s' does not include a unique id. Ignoring it.",
filename);
dyncfg_cleanup(&tmp);
return;
}
dictionary_set(dyncfg_globals.nodes, id, &tmp, sizeof(tmp));
}
void dyncfg_load_all(void) {
DIR *dir = opendir(dyncfg_globals.dir);
if (!dir) {
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: cannot open directory '%s'", dyncfg_globals.dir);
return;
}
struct dirent *entry;
char filepath[PATH_MAX];
while ((entry = readdir(dir)) != NULL) {
if ((entry->d_type == DT_REG || entry->d_type == DT_LNK) && strendswith(entry->d_name, ".dyncfg")) {
snprintf(filepath, sizeof(filepath), "%s/%s", dyncfg_globals.dir, entry->d_name);
dyncfg_file_load(filepath);
}
}
closedir(dir);
}
// ----------------------------------------------------------------------------
// schemas loading
static bool dyncfg_read_file_to_buffer(const char *filename, BUFFER *dst) {
int fd = open(filename, O_RDONLY, 0666);
if(unlikely(fd == -1))
return false;
struct stat st = { 0 };
if(fstat(fd, &st) != 0) {
close(fd);
return false;
}
buffer_flush(dst);
buffer_need_bytes(dst, st.st_size + 1); // +1 for the terminating zero
ssize_t r = read(fd, (char*)dst->buffer, st.st_size);
if(unlikely(r == -1)) {
close(fd);
return false;
}
dst->len = r;
dst->buffer[dst->len] = '\0';
close(fd);
return true;
}
bool dyncfg_get_schema(const char *id, BUFFER *dst) {
char filename[FILENAME_MAX + 1];
snprintfz(filename, sizeof(filename), "%s/schema.d/%s.json", netdata_configured_user_config_dir, id);
if(dyncfg_read_file_to_buffer(filename, dst))
return true;
snprintfz(filename, sizeof(filename), "%s/schema.d/%s.json", netdata_configured_stock_config_dir, id);
if(dyncfg_read_file_to_buffer(filename, dst))
return true;
return false;
}

View file

@ -0,0 +1,61 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "dyncfg.h"
static DICTIONARY *dyncfg_nodes = NULL;
static int dyncfg_inline_callback(struct rrd_function_execute *rfe, void *data) {
char tr[UUID_COMPACT_STR_LEN];
uuid_unparse_lower_compact(*rfe->transaction, tr);
bool cancelled = rfe->is_cancelled.cb ? rfe->is_cancelled.cb(rfe->is_cancelled.data) : false;
int code;
if(cancelled)
code = HTTP_RESP_CLIENT_CLOSED_REQUEST;
else
code = dyncfg_node_find_and_call(dyncfg_nodes, tr, rfe->function, rfe->stop_monotonic_ut, &cancelled, rfe->payload, rfe->source, rfe->result.wb);
if(code == HTTP_RESP_CLIENT_CLOSED_REQUEST || (rfe->is_cancelled.cb && rfe->is_cancelled.cb(rfe->is_cancelled.data))) {
buffer_flush(rfe->result.wb);
code = HTTP_RESP_CLIENT_CLOSED_REQUEST;
}
if(rfe->result.cb)
rfe->result.cb(rfe->result.wb, code, rfe->result.data);
return code;
}
bool dyncfg_add(RRDHOST *host, const char *id, const char *path, DYNCFG_STATUS status, DYNCFG_TYPE type, DYNCFG_SOURCE_TYPE source_type, const char *source, DYNCFG_CMDS cmds, dyncfg_cb_t cb, void *data) {
if(dyncfg_add_low_level(host, id, path, status, type, source_type, source, cmds,
0, 0, true,
dyncfg_inline_callback, NULL)) {
struct dyncfg_node tmp = {
.cmds = cmds,
.type = type,
.cb = cb,
.data = data,
};
dictionary_set(dyncfg_nodes, id, &tmp, sizeof(tmp));
return true;
}
return false;
}
void dyncfg_del(RRDHOST *host, const char *id) {
dictionary_del(dyncfg_nodes, id);
dyncfg_del_low_level(host, id);
}
void dyncfg_status(RRDHOST *host, const char *id, DYNCFG_STATUS status) {
dyncfg_status_low_level(host, id, status);
}
void dyncfg_init(bool load_saved) {
dyncfg_nodes = dyncfg_nodes_dictionary_create();
dyncfg_init_low_level(load_saved);
}

View file

@ -0,0 +1,351 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "dyncfg-internals.h"
#include "dyncfg.h"
// ----------------------------------------------------------------------------
// we intercept the config function calls of the plugin
struct dyncfg_call {
BUFFER *payload;
char *function;
char *id;
char *add_name;
char *source;
DYNCFG_CMDS cmd;
rrd_function_result_callback_t result_cb;
void *result_cb_data;
bool from_dyncfg_echo;
};
DYNCFG_STATUS dyncfg_status_from_successful_response(int code) {
DYNCFG_STATUS status;
if(code == DYNCFG_RESP_RUNNING)
status = DYNCFG_STATUS_RUNNING;
else if(code == DYNCFG_RESP_ACCEPTED || code == DYNCFG_RESP_ACCEPTED_RESTART_REQUIRED)
status = DYNCFG_STATUS_ACCEPTED;
return status;
}
void dyncfg_function_intercept_result_cb(BUFFER *wb, int code, void *result_cb_data) {
struct dyncfg_call *dc = result_cb_data;
bool called_from_dyncfg_echo = dc->from_dyncfg_echo;
const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item_advanced(dyncfg_globals.nodes, dc->id, -1);
if(item) {
DYNCFG *df = dictionary_acquired_item_value(item);
bool old_user_disabled = df->user_disabled;
bool save_required = false;
if (!called_from_dyncfg_echo) {
// the command was sent by a user
if (DYNCFG_RESP_SUCCESS(code)) {
if (dc->cmd == DYNCFG_CMD_ADD) {
char id[strlen(dc->id) + 1 + strlen(dc->add_name) + 1];
snprintfz(id, sizeof(id), "%s:%s", dc->id, dc->add_name);
const DICTIONARY_ITEM *new_item = dyncfg_add_internal(
df->host,
id,
string2str(df->path),
dyncfg_status_from_successful_response(code),
DYNCFG_TYPE_JOB,
DYNCFG_SOURCE_TYPE_DYNCFG,
dc->source,
(df->cmds & ~DYNCFG_CMD_ADD) | DYNCFG_CMD_GET | DYNCFG_CMD_UPDATE | DYNCFG_CMD_TEST | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE | DYNCFG_CMD_REMOVE,
0,
0,
df->sync,
df->execute_cb, df->execute_cb_data, false);
DYNCFG *new_df = dictionary_acquired_item_value(new_item);
SWAP(new_df->payload, dc->payload);
if(code == DYNCFG_RESP_ACCEPTED_RESTART_REQUIRED)
new_df->restart_required = true;
dyncfg_file_save(id, new_df);
dictionary_acquired_item_release(dyncfg_globals.nodes, new_item);
} else if (dc->cmd == DYNCFG_CMD_UPDATE) {
df->source_type = DYNCFG_SOURCE_TYPE_DYNCFG;
string_freez(df->source);
df->source = string_strdupz(dc->source);
df->status = dyncfg_status_from_successful_response(code);
SWAP(df->payload, dc->payload);
save_required = true;
} else if (dc->cmd == DYNCFG_CMD_ENABLE) {
df->user_disabled = false;
} else if (dc->cmd == DYNCFG_CMD_DISABLE) {
df->user_disabled = true;
} else if (dc->cmd == DYNCFG_CMD_REMOVE) {
dyncfg_file_delete(dc->id);
}
if(dc->cmd != DYNCFG_CMD_ADD && code == DYNCFG_RESP_ACCEPTED_RESTART_REQUIRED)
df->restart_required = true;
}
else
nd_log(NDLS_DAEMON, NDLP_ERR,
"DYNCFG: plugin returned code %d to user initiated call: %s", code, dc->function);
}
else {
// the command was sent by dyncfg
if(DYNCFG_RESP_SUCCESS(code)) {
if(dc->cmd == DYNCFG_CMD_ADD) {
char id[strlen(dc->id) + 1 + strlen(dc->add_name) + 1];
snprintfz(id, sizeof(id), "%s:%s", dc->id, dc->add_name);
const DICTIONARY_ITEM *new_item = dictionary_get_and_acquire_item(dyncfg_globals.nodes, id);
if(new_item) {
DYNCFG *new_df = dictionary_acquired_item_value(new_item);
new_df->status = dyncfg_status_from_successful_response(code);
if(code == DYNCFG_RESP_ACCEPTED_RESTART_REQUIRED)
new_df->restart_required = true;
dictionary_acquired_item_release(dyncfg_globals.nodes, new_item);
}
}
else if(dc->cmd == DYNCFG_CMD_UPDATE) {
df->status = dyncfg_status_from_successful_response(code);
df->plugin_rejected = false;
}
else if(dc->cmd == DYNCFG_CMD_DISABLE)
df->status = DYNCFG_STATUS_DISABLED;
else if(dc->cmd == DYNCFG_CMD_ENABLE)
df->status = dyncfg_status_from_successful_response(code);
if(dc->cmd != DYNCFG_CMD_ADD && code == DYNCFG_RESP_ACCEPTED_RESTART_REQUIRED)
df->restart_required = true;
}
else {
nd_log(NDLS_DAEMON, NDLP_ERR,
"DYNCFG: plugin returned code %d to dyncfg initiated call: %s", code, dc->function);
if(dc->cmd & (DYNCFG_CMD_UPDATE | DYNCFG_CMD_ADD))
df->plugin_rejected = true;
}
}
if (save_required || old_user_disabled != df->user_disabled)
dyncfg_file_save(dc->id, df);
dictionary_acquired_item_release(dyncfg_globals.nodes, item);
}
if(dc->result_cb)
dc->result_cb(wb, code, dc->result_cb_data);
buffer_free(dc->payload);
freez(dc->function);
freez(dc->id);
freez(dc->source);
freez(dc->add_name);
freez(dc);
}
// ----------------------------------------------------------------------------
static void dyncfg_apply_action_on_all_template_jobs(const char *template_id, DYNCFG_CMDS c) {
STRING *template = string_strdupz(template_id);
DYNCFG *df;
dfe_start_reentrant(dyncfg_globals.nodes, df) {
if(df->template == template && df->type == DYNCFG_TYPE_JOB) {
DYNCFG_STATUS cmd_to_send_to_plugin = c;
if(c == DYNCFG_CMD_ENABLE)
cmd_to_send_to_plugin = df->user_disabled ? DYNCFG_CMD_DISABLE : DYNCFG_CMD_ENABLE;
else if(c == DYNCFG_CMD_DISABLE)
cmd_to_send_to_plugin = DYNCFG_CMD_DISABLE;
dyncfg_echo(df_dfe.item, df, df_dfe.name, cmd_to_send_to_plugin);
}
}
dfe_done(df);
string_freez(template);
}
// ----------------------------------------------------------------------------
// the callback for all config functions
int dyncfg_function_intercept_cb(struct rrd_function_execute *rfe, void *data __maybe_unused) {
// IMPORTANT: this function MUST call the result_cb even on failures
bool called_from_dyncfg_echo = rrd_function_has_this_original_result_callback(rfe->transaction, dyncfg_echo_cb);
DYNCFG_CMDS c = DYNCFG_CMD_NONE;
const DICTIONARY_ITEM *item = NULL;
const char *add_name = NULL;
size_t add_name_len = 0;
if(strncmp(rfe->function, PLUGINSD_FUNCTION_CONFIG " ", sizeof(PLUGINSD_FUNCTION_CONFIG)) == 0) {
const char *id = &rfe->function[sizeof(PLUGINSD_FUNCTION_CONFIG)];
while(isspace(*id)) id++;
const char *space = id;
while(*space && !isspace(*space)) space++;
size_t id_len = space - id;
const char *cmd = space;
while(isspace(*cmd)) cmd++;
space = cmd;
while(*space && !isspace(*space)) space++;
size_t cmd_len = space - cmd;
char cmd_copy[cmd_len + 1];
strncpyz(cmd_copy, cmd, cmd_len);
c = dyncfg_cmds2id(cmd_copy);
if(c == DYNCFG_CMD_ADD) {
add_name = space;
while(isspace(*add_name)) add_name++;
space = add_name;
while(*space && !isspace(*space)) space++;
add_name_len = space - add_name;
}
item = dictionary_get_and_acquire_item_advanced(dyncfg_globals.nodes, id, (ssize_t)id_len);
}
int rc = HTTP_RESP_INTERNAL_SERVER_ERROR;
if(!item) {
rc = HTTP_RESP_NOT_FOUND;
dyncfg_default_response(rfe->result.wb, rc, "dyncfg functions intercept: id is not found");
if(rfe->result.cb)
rfe->result.cb(rfe->result.wb, rc, rfe->result.data);
return HTTP_RESP_NOT_FOUND;
}
DYNCFG *df = dictionary_acquired_item_value(item);
const char *id = dictionary_acquired_item_name(item);
bool has_payload = rfe->payload && buffer_strlen(rfe->payload) ? true : false;
bool make_the_call_to_plugin = true;
if((c & (DYNCFG_CMD_GET | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE | DYNCFG_CMD_REMOVE | DYNCFG_CMD_RESTART)) && has_payload)
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: command has a payload, but it is not going to be used: %s", rfe->function);
if(c == DYNCFG_CMD_NONE) {
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: this command is unknown: %s", rfe->function);
rc = HTTP_RESP_BAD_REQUEST;
dyncfg_default_response(rfe->result.wb, rc,
"dyncfg functions intercept: unknown command");
make_the_call_to_plugin = false;
}
else if(!(df->cmds & c)) {
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: this command is not supported by the configuration node: %s", rfe->function);
rc = HTTP_RESP_BAD_REQUEST;
dyncfg_default_response(rfe->result.wb, rc,
"dyncfg functions intercept: this command is not supported by this configuration node");
make_the_call_to_plugin = false;
}
else if((c & (DYNCFG_CMD_ADD | DYNCFG_CMD_UPDATE | DYNCFG_CMD_TEST)) && !has_payload) {
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: command requires a payload, but no payload given: %s", rfe->function);
rc = HTTP_RESP_BAD_REQUEST;
dyncfg_default_response(rfe->result.wb, rc,
"dyncfg functions intercept: payload is required");
make_the_call_to_plugin = false;
}
else if(c == DYNCFG_CMD_SCHEMA) {
bool loaded = false;
if(df->type == DYNCFG_TYPE_JOB) {
char template[strlen(id) + 1];
memcpy(template, id, sizeof(template));
char *colon = strrchr(template, ':');
if(colon) *colon = '\0';
if(template[0])
loaded = dyncfg_get_schema(template, rfe->result.wb);
}
else
loaded = dyncfg_get_schema(id, rfe->result.wb);
if(loaded) {
rfe->result.wb->content_type = CT_APPLICATION_JSON;
rfe->result.wb->expires = now_realtime_sec();
rc = HTTP_RESP_OK;
make_the_call_to_plugin = false;
}
}
else if(c & (DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE | DYNCFG_CMD_RESTART) && df->type == DYNCFG_TYPE_TEMPLATE) {
if(!called_from_dyncfg_echo) {
bool old_user_disabled = df->user_disabled;
if (c == DYNCFG_CMD_ENABLE)
df->user_disabled = false;
else if (c == DYNCFG_CMD_DISABLE)
df->user_disabled = true;
if (df->user_disabled != old_user_disabled)
dyncfg_file_save(id, df);
}
dyncfg_apply_action_on_all_template_jobs(id, c);
rc = HTTP_RESP_OK;
dyncfg_default_response(rfe->result.wb, rc, "applied");
make_the_call_to_plugin = false;
}
else if(c == DYNCFG_CMD_ADD) {
if (df->type != DYNCFG_TYPE_TEMPLATE) {
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: add command can only be applied on templates, not %s: %s",
dyncfg_id2type(df->type), rfe->function);
rc = HTTP_RESP_BAD_REQUEST;
dyncfg_default_response(rfe->result.wb, rc,
"dyncfg functions intercept: add command is only allowed in templates");
make_the_call_to_plugin = false;
}
else if (!add_name || !*add_name || !add_name_len) {
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: add command does not specify a name: %s", rfe->function);
rc = HTTP_RESP_BAD_REQUEST;
dyncfg_default_response(rfe->result.wb, rc,
"dyncfg functions intercept: command add requires a name, which is missing");
make_the_call_to_plugin = false;
}
}
else if(c == DYNCFG_CMD_ENABLE && df->type == DYNCFG_TYPE_JOB && dyncfg_is_user_disabled(string2str(df->template))) {
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: cannot enable a job of a disabled template: %s", rfe->function);
rc = HTTP_RESP_BAD_REQUEST;
dyncfg_default_response(rfe->result.wb, rc,
"dyncfg functions intercept: this job belongs to disabled template");
make_the_call_to_plugin = false;
}
if(make_the_call_to_plugin) {
struct dyncfg_call *dc = callocz(1, sizeof(*dc));
dc->function = strdupz(rfe->function);
dc->id = strdupz(id);
dc->source = rfe->source ? strdupz(rfe->source) : NULL;
dc->add_name = (c == DYNCFG_CMD_ADD) ? strndupz(add_name, add_name_len) : NULL;
dc->cmd = c;
dc->result_cb = rfe->result.cb;
dc->result_cb_data = rfe->result.data;
dc->payload = buffer_dup(rfe->payload);
dc->from_dyncfg_echo = called_from_dyncfg_echo;
rfe->result.cb = dyncfg_function_intercept_result_cb;
rfe->result.data = dc;
rc = df->execute_cb(rfe, df->execute_cb_data);
}
else if(rfe->result.cb)
rfe->result.cb(rfe->result.wb, rc, rfe->result.data);
dictionary_acquired_item_release(dyncfg_globals.nodes, item);
return rc;
}

View file

@ -0,0 +1,65 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#ifndef NETDATA_DYNCFG_INTERNALS_H
#define NETDATA_DYNCFG_INTERNALS_H
#include "../common.h"
#include "../../database/rrd.h"
#include "../../database/rrdfunctions.h"
#include "../../database/rrdfunctions-internals.h"
#include "../../database/rrdcollector-internals.h"
typedef struct dyncfg {
RRDHOST *host;
uuid_t host_uuid;
STRING *function;
STRING *template;
STRING *path;
DYNCFG_STATUS status;
DYNCFG_TYPE type;
DYNCFG_CMDS cmds;
DYNCFG_SOURCE_TYPE source_type;
STRING *source;
usec_t created_ut;
usec_t modified_ut;
uint32_t saves;
bool sync;
bool user_disabled;
bool plugin_rejected;
bool restart_required;
BUFFER *payload;
rrd_function_execute_cb_t execute_cb;
void *execute_cb_data;
// constructor data
bool overwrite_cb;
} DYNCFG;
struct dyncfg_globals {
const char *dir;
DICTIONARY *nodes;
};
extern struct dyncfg_globals dyncfg_globals;
void dyncfg_load_all(void);
void dyncfg_file_load(const char *filename);
void dyncfg_file_save(const char *id, DYNCFG *df);
void dyncfg_file_delete(const char *id);
bool dyncfg_get_schema(const char *id, BUFFER *dst);
void dyncfg_echo_cb(BUFFER *wb, int code, void *result_cb_data);
void dyncfg_echo(const DICTIONARY_ITEM *item, DYNCFG *df, const char *id, DYNCFG_CMDS cmd);
void dyncfg_echo_update(const DICTIONARY_ITEM *item, DYNCFG *df, const char *id);
void dyncfg_echo_add(const DICTIONARY_ITEM *template_item, DYNCFG *template_df, const char *template_id, const char *job_name);
const DICTIONARY_ITEM *dyncfg_add_internal(RRDHOST *host, const char *id, const char *path, DYNCFG_STATUS status, DYNCFG_TYPE type, DYNCFG_SOURCE_TYPE source_type, const char *source, DYNCFG_CMDS cmds, usec_t created_ut, usec_t modified_ut, bool sync, rrd_function_execute_cb_t execute_cb, void *execute_cb_data, bool overwrite_cb);
int dyncfg_function_intercept_cb(struct rrd_function_execute *rfe, void *data);
void dyncfg_cleanup(DYNCFG *v);
bool dyncfg_is_user_disabled(const char *id);
#endif //NETDATA_DYNCFG_INTERNALS_H

202
daemon/config/dyncfg-tree.c Normal file
View file

@ -0,0 +1,202 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "dyncfg-internals.h"
#include "dyncfg.h"
static int dyncfg_tree_compar(const void *a, const void *b) {
const DICTIONARY_ITEM *item1 = *(const DICTIONARY_ITEM **)a;
const DICTIONARY_ITEM *item2 = *(const DICTIONARY_ITEM **)b;
DYNCFG *df1 = dictionary_acquired_item_value(item1);
DYNCFG *df2 = dictionary_acquired_item_value(item2);
int rc = string_cmp(df1->path, df2->path);
if(rc == 0)
rc = strcmp(dictionary_acquired_item_name(item1), dictionary_acquired_item_name(item2));
return rc;
}
static void dyncfg_to_json(DYNCFG *df, const char *id, BUFFER *wb) {
buffer_json_member_add_object(wb, id);
{
buffer_json_member_add_string(wb, "type", dyncfg_id2type(df->type));
buffer_json_member_add_string(wb, "status", dyncfg_id2status(df->status));
dyncfg_cmds2json_array(df->cmds, "cmds", wb);
buffer_json_member_add_string(wb, "source_type", dyncfg_id2source_type(df->source_type));
buffer_json_member_add_string(wb, "source", string2str(df->source));
buffer_json_member_add_boolean(wb, "sync", df->sync);
buffer_json_member_add_boolean(wb, "user_disabled", df->user_disabled);
buffer_json_member_add_boolean(wb, "restart_required", df->restart_required);
buffer_json_member_add_boolean(wb, "plugin_rejected", df->restart_required);
buffer_json_member_add_object(wb, "payload");
{
if (df->payload && buffer_strlen(df->payload)) {
buffer_json_member_add_boolean(wb, "available", true);
buffer_json_member_add_string(wb, "content_type", content_type_id2string(df->payload->content_type));
buffer_json_member_add_uint64(wb, "content_length", df->payload->len);
} else
buffer_json_member_add_boolean(wb, "available", false);
}
buffer_json_object_close(wb); // payload
buffer_json_member_add_uint64(wb, "saves", df->saves);
buffer_json_member_add_uint64(wb, "created_ut", df->created_ut);
buffer_json_member_add_uint64(wb, "modified_ut", df->modified_ut);
}
buffer_json_object_close(wb);
}
static void dyncfg_tree_for_host(RRDHOST *host, BUFFER *wb, const char *parent, const char *id) {
size_t entries = dictionary_entries(dyncfg_globals.nodes);
size_t used = 0;
const DICTIONARY_ITEM *items[entries];
size_t restart_required = 0, plugin_rejected = 0, status_incomplete = 0, status_failed = 0;
size_t parent_len = strlen(parent);
DYNCFG *df;
dfe_start_read(dyncfg_globals.nodes, df) {
if(!df->host) {
if(uuid_memcmp(&df->host_uuid, &host->host_uuid) == 0)
df->host = host;
}
if(df->host != host || strncmp(string2str(df->path), parent, parent_len) != 0)
continue;
if(!rrd_function_available(host, string2str(df->function)))
df->status = DYNCFG_STATUS_ORPHAN;
items[used++] = dictionary_acquired_item_dup(dyncfg_globals.nodes, df_dfe.item);
}
dfe_done(df);
qsort(items, used, sizeof(const DICTIONARY_ITEM *), dyncfg_tree_compar);
buffer_flush(wb);
buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY);
buffer_json_member_add_uint64(wb, "version", 1);
buffer_json_member_add_object(wb, "tree");
{
STRING *last_path = NULL;
for (size_t i = 0; i < used; i++) {
df = dictionary_acquired_item_value(items[i]);
if (df->path != last_path) {
last_path = df->path;
if (i)
buffer_json_object_close(wb);
buffer_json_member_add_object(wb, string2str(last_path));
}
dyncfg_to_json(df, dictionary_acquired_item_name(items[i]), wb);
if(df->status != DYNCFG_STATUS_ORPHAN) {
if (df->restart_required)
restart_required++;
if (df->plugin_rejected)
plugin_rejected++;
if (df->status == DYNCFG_STATUS_FAILED)
status_failed++;
if (df->status == DYNCFG_STATUS_INCOMPLETE)
status_incomplete++;
}
}
if (used)
buffer_json_object_close(wb);
}
buffer_json_object_close(wb); // tree
buffer_json_member_add_object(wb, "attention");
{
buffer_json_member_add_boolean(wb, "degraded", restart_required + plugin_rejected + status_failed + status_incomplete > 0);
buffer_json_member_add_uint64(wb, "restart_required", restart_required);
buffer_json_member_add_uint64(wb, "plugin_rejected", plugin_rejected);
buffer_json_member_add_uint64(wb, "status_failed", status_failed);
buffer_json_member_add_uint64(wb, "status_incomplete", status_incomplete);
}
buffer_json_object_close(wb); // attention
buffer_json_agents_v2(wb, NULL, 0, false, false);
buffer_json_finalize(wb);
for(size_t i = 0; i < used ;i++)
dictionary_acquired_item_release(dyncfg_globals.nodes, items[i]);
}
static int dyncfg_config_execute_cb(struct rrd_function_execute *rfe, void *data) {
RRDHOST *host = data;
int code;
char buf[strlen(rfe->function) + 1];
memcpy(buf, rfe->function, sizeof(buf));
char *words[MAX_FUNCTION_PARAMETERS]; // an array of pointers for the words in this line
size_t num_words = quoted_strings_splitter_pluginsd(buf, words, MAX_FUNCTION_PARAMETERS);
const char *config = get_word(words, num_words, 0);
const char *action = get_word(words, num_words, 1);
const char *path = get_word(words, num_words, 2);
const char *id = get_word(words, num_words, 3);
if(!config || !*config || strcmp(config, PLUGINSD_FUNCTION_CONFIG) != 0) {
char *msg = "invalid function call, expected: config";
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG TREE: function call '%s': %s", rfe->function, msg);
code = dyncfg_default_response(rfe->result.wb, HTTP_RESP_BAD_REQUEST, msg);
goto cleanup;
}
if(!action || !*action) {
char *msg = "invalid function call, expected: config tree";
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG TREE: function call '%s': %s", rfe->function, msg);
code = dyncfg_default_response(rfe->result.wb, HTTP_RESP_BAD_REQUEST, msg);
goto cleanup;
}
if(strcmp(action, "tree") == 0) {
if(!path || !*path)
path = "/";
if(!id || !*id)
id = NULL;
else if(!dyncfg_is_valid_id(id)) {
char *msg = "invalid id given";
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG TREE: function call '%s': %s", rfe->function, msg);
code = dyncfg_default_response(rfe->result.wb, HTTP_RESP_BAD_REQUEST, msg);
goto cleanup;
}
code = HTTP_RESP_OK;
dyncfg_tree_for_host(host, rfe->result.wb, path, id);
}
else {
code = HTTP_RESP_NOT_FOUND;
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: unknown config id '%s' in call: '%s'. This can happen if the plugin that registered the dynamic configuration is not running now.", action, rfe->function);
rrd_call_function_error(rfe->result.wb, "unknown config id given", code);
}
cleanup:
if(rfe->result.cb)
rfe->result.cb(rfe->result.wb, code, rfe->result.data);
return code;
}
// ----------------------------------------------------------------------------
// this adds a 'config' function to all leaf nodes (localhost and virtual nodes)
// which is used to serve the tree and act as a catch-all for all config calls
// for which there is no id overloaded.
void dyncfg_host_init(RRDHOST *host) {
rrd_function_add(host, NULL, PLUGINSD_FUNCTION_CONFIG, 120,
1000, "Dynamic configuration", "config",
HTTP_ACCESS_ADMIN,
true, dyncfg_config_execute_cb, host);
}

View file

@ -0,0 +1,792 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "dyncfg-internals.h"
#include "dyncfg.h"
// ----------------------------------------------------------------------------
// unit test
#define LINE_FILE_STR TOSTRING(__LINE__) "@" __FILE__
struct dyncfg_unittest {
bool enabled;
size_t errors;
DICTIONARY *nodes;
SPINLOCK spinlock;
struct dyncfg_unittest_action *queue;
} dyncfg_unittest_data = { 0 };
typedef struct {
bool enabled;
bool removed;
struct {
double dbl;
bool bln;
} value;
} TEST_CFG;
typedef struct {
const char *id;
const char *source;
bool sync;
DYNCFG_TYPE type;
DYNCFG_CMDS cmds;
DYNCFG_SOURCE_TYPE source_type;
TEST_CFG current;
TEST_CFG expected;
bool received;
bool finished;
size_t last_saves;
bool needs_save;
} TEST;
struct dyncfg_unittest_action {
TEST *t;
BUFFER *result;
BUFFER *payload;
DYNCFG_CMDS cmd;
const char *add_name;
const char *source;
rrd_function_result_callback_t result_cb;
void *result_cb_data;
struct dyncfg_unittest_action *prev, *next;
};
static void dyncfg_unittest_register_error(const char *id, const char *msg) {
if(msg)
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG UNITTEST: error on id '%s': %s", id ? id : "", msg);
__atomic_add_fetch(&dyncfg_unittest_data.errors, 1, __ATOMIC_RELAXED);
}
static int dyncfg_unittest_execute_cb(struct rrd_function_execute *rfe, void *data);
bool dyncfg_unittest_parse_payload(BUFFER *payload, TEST *t, DYNCFG_CMDS cmd, const char *add_name, const char *source) {
CLEAN_JSON_OBJECT *jobj = json_tokener_parse(buffer_tostring(payload));
if(!jobj) {
dyncfg_unittest_register_error(t->id, "cannot parse json payload");
return false;
}
struct json_object *json_double;
struct json_object *json_boolean;
json_object_object_get_ex(jobj, "double", &json_double);
double value_double = json_object_get_double(json_double);
json_object_object_get_ex(jobj, "boolean", &json_boolean);
int value_boolean = json_object_get_boolean(json_boolean);
if(cmd == DYNCFG_CMD_UPDATE) {
t->current.value.dbl = value_double;
t->current.value.bln = value_boolean;
}
else if(cmd == DYNCFG_CMD_ADD) {
char buf[strlen(t->id) + strlen(add_name) + 20];
snprintfz(buf, sizeof(buf), "%s:%s", t->id, add_name);
TEST tmp = {
.id = strdupz(buf),
.source = strdupz(source),
.cmds = (t->cmds & ~DYNCFG_CMD_ADD) | DYNCFG_CMD_GET | DYNCFG_CMD_REMOVE | DYNCFG_CMD_UPDATE | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE | DYNCFG_CMD_TEST,
.sync = t->sync,
.type = DYNCFG_TYPE_JOB,
.source_type = DYNCFG_SOURCE_TYPE_DYNCFG,
.received = true,
.finished = true,
.current =
{.enabled = true,
.removed = false,
.value =
{
.dbl = value_double,
.bln = value_boolean,
}},
.expected = {
.enabled = true,
.removed = false,
.value = {
.dbl = 3.14,
.bln = true,
}
},
.needs_save = true,
};
const DICTIONARY_ITEM *item = dictionary_set_and_acquire_item(dyncfg_unittest_data.nodes, buf, &tmp, sizeof(tmp));
TEST *t2 = dictionary_acquired_item_value(item);
dictionary_acquired_item_release(dyncfg_unittest_data.nodes, item);
dyncfg_add_low_level(localhost, t2->id, "/unittests",
DYNCFG_STATUS_RUNNING, t2->type, t2->source_type, t2->source,
t2->cmds, 0, 0, t2->sync,
dyncfg_unittest_execute_cb, t2);
}
else {
dyncfg_unittest_register_error(t->id, "invalid command received to parse payload");
return false;
}
return true;
}
static int dyncfg_unittest_action(struct dyncfg_unittest_action *a) {
TEST *t = a->t;
int rc = HTTP_RESP_OK;
if(a->cmd == DYNCFG_CMD_ENABLE)
t->current.enabled = true;
else if(a->cmd == DYNCFG_CMD_DISABLE)
t->current.enabled = false;
else if(a->cmd == DYNCFG_CMD_ADD || a->cmd == DYNCFG_CMD_UPDATE)
rc = dyncfg_unittest_parse_payload(a->payload, a->t, a->cmd, a->add_name, a->source) ? HTTP_RESP_OK : HTTP_RESP_BAD_REQUEST;
else if(a->cmd == DYNCFG_CMD_REMOVE)
t->current.removed = true;
else
rc = HTTP_RESP_BAD_REQUEST;
dyncfg_default_response(a->result, rc, NULL);
a->result_cb(a->result, rc, a->result_cb_data);
buffer_free(a->payload);
freez((void *)a->add_name);
freez(a);
__atomic_store_n(&t->finished, true, __ATOMIC_RELAXED);
return rc;
}
static void *dyncfg_unittest_thread_action(void *ptr) {
while(1) {
struct dyncfg_unittest_action *a = NULL;
spinlock_lock(&dyncfg_unittest_data.spinlock);
a = dyncfg_unittest_data.queue;
if(a)
DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(dyncfg_unittest_data.queue, a, prev, next);
spinlock_unlock(&dyncfg_unittest_data.spinlock);
if(a)
dyncfg_unittest_action(a);
else
sleep_usec(10 * USEC_PER_MS);
}
}
static int dyncfg_unittest_execute_cb(struct rrd_function_execute *rfe, void *data) {
int rc;
bool run_the_callback = true;
TEST *t = data;
t->received = true;
char buf[strlen(rfe->function) + 1];
memcpy(buf, rfe->function, sizeof(buf));
char *words[MAX_FUNCTION_PARAMETERS]; // an array of pointers for the words in this line
size_t num_words = quoted_strings_splitter_pluginsd(buf, words, MAX_FUNCTION_PARAMETERS);
const char *config = get_word(words, num_words, 0);
const char *id = get_word(words, num_words, 1);
const char *action = get_word(words, num_words, 2);
const char *add_name = get_word(words, num_words, 3);
if(!config || !*config || strcmp(config, PLUGINSD_FUNCTION_CONFIG) != 0) {
char *msg = "did not receive a config call";
dyncfg_unittest_register_error(id, msg);
rc = dyncfg_default_response(rfe->result.wb, HTTP_RESP_BAD_REQUEST, msg);
goto cleanup;
}
if(!id || !*id) {
char *msg = "did not receive an id";
dyncfg_unittest_register_error(id, msg);
rc = dyncfg_default_response(rfe->result.wb, HTTP_RESP_BAD_REQUEST, msg);
goto cleanup;
}
if(t->type != DYNCFG_TYPE_TEMPLATE && strcmp(t->id, id) != 0) {
char *msg = "id received is not the expected";
dyncfg_unittest_register_error(id, msg);
rc = dyncfg_default_response(rfe->result.wb, HTTP_RESP_BAD_REQUEST, msg);
goto cleanup;
}
if(!action || !*action) {
char *msg = "did not receive an action";
dyncfg_unittest_register_error(id, msg);
rc = dyncfg_default_response(rfe->result.wb, HTTP_RESP_BAD_REQUEST, msg);
goto cleanup;
}
DYNCFG_CMDS cmd = dyncfg_cmds2id(action);
if(cmd == DYNCFG_CMD_NONE) {
char *msg = "action received is not known";
dyncfg_unittest_register_error(id, msg);
rc = dyncfg_default_response(rfe->result.wb, HTTP_RESP_BAD_REQUEST, msg);
goto cleanup;
}
if(!(t->cmds & cmd)) {
char *msg = "received a command that is not supported";
dyncfg_unittest_register_error(id, msg);
rc = dyncfg_default_response(rfe->result.wb, HTTP_RESP_BAD_REQUEST, msg);
goto cleanup;
}
if(t->current.removed && cmd != DYNCFG_CMD_ADD) {
char *msg = "received a command for a removed entry";
dyncfg_unittest_register_error(id, msg);
rc = dyncfg_default_response(rfe->result.wb, HTTP_RESP_BAD_REQUEST, msg);
goto cleanup;
}
struct dyncfg_unittest_action *a = callocz(1, sizeof(*a));
a->t = t;
a->add_name = add_name ? strdupz(add_name) : NULL;
a->source = rfe->source,
a->result = rfe->result.wb;
a->payload = buffer_dup(rfe->payload);
a->cmd = cmd;
a->result_cb = rfe->result.cb;
a->result_cb_data = rfe->result.data;
run_the_callback = false;
if(t->sync)
rc = dyncfg_unittest_action(a);
else {
spinlock_lock(&dyncfg_unittest_data.spinlock);
DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(dyncfg_unittest_data.queue, a, prev, next);
spinlock_unlock(&dyncfg_unittest_data.spinlock);
rc = HTTP_RESP_OK;
}
cleanup:
if(run_the_callback) {
__atomic_store_n(&t->finished, true, __ATOMIC_RELAXED);
if (rfe->result.cb)
rfe->result.cb(rfe->result.wb, rc, rfe->result.data);
}
return rc;
}
static bool dyncfg_unittest_check(TEST *t, const char *cmd, bool received) {
size_t errors = 0;
fprintf(stderr, "CHECK '%s' after cmd '%s'...", t->id, cmd);
if(t->received != received) {
fprintf(stderr, "\n - received flag found '%s', expected '%s'",
t->received?"true":"false",
received?"true":"false");
errors++;
goto cleanup;
}
if(!received)
goto cleanup;
usec_t give_up_ut = now_monotonic_usec() + 2 * USEC_PER_SEC;
while(!__atomic_load_n(&t->finished, __ATOMIC_RELAXED)) {
static const struct timespec ns = { .tv_sec = 0, .tv_nsec = 1 };
nanosleep(&ns, NULL);
if(now_monotonic_usec() > give_up_ut) {
fprintf(stderr, "\n - gave up waiting for the plugin to process this!");
errors++;
goto cleanup;
}
}
if(t->type != DYNCFG_TYPE_TEMPLATE && t->current.enabled != t->expected.enabled) {
fprintf(stderr, "\n - enabled flag found '%s', expected '%s'",
t->current.enabled?"true":"false",
t->expected.enabled?"true":"false");
errors++;
}
if(t->current.removed != t->expected.removed) {
fprintf(stderr, "\n - removed flag found '%s', expected '%s'",
t->current.removed?"true":"false",
t->expected.removed?"true":"false");
errors++;
}
if(t->current.value.bln != t->expected.value.bln) {
fprintf(stderr, "\n - boolean value found '%s', expected '%s'",
t->current.value.bln?"true":"false",
t->expected.value.bln?"true":"false");
errors++;
}
if(t->current.value.dbl != t->expected.value.dbl) {
fprintf(stderr, "\n - double value found '%f', expected '%f'",
t->current.value.dbl, t->expected.value.dbl);
errors++;
}
DYNCFG *df = dictionary_get(dyncfg_globals.nodes, t->id);
if(!df) {
fprintf(stderr, "\n - not found in DYNCFG nodes dictionary!");
errors++;
}
else if(df->cmds != t->cmds) {
fprintf(stderr, "\n - has different cmds in DYNCFG nodes dictionary; found: ");
dyncfg_cmds2fp(df->cmds, stderr);
fprintf(stderr, ", expected: ");
dyncfg_cmds2fp(t->cmds, stderr);
fprintf(stderr, "\n");
errors++;
}
else if(df->type == DYNCFG_TYPE_JOB && df->source_type == DYNCFG_SOURCE_TYPE_DYNCFG && !df->saves) {
fprintf(stderr, "\n - DYNCFG job has no saves!");
errors++;
}
else if(df->type == DYNCFG_TYPE_JOB && df->source_type == DYNCFG_SOURCE_TYPE_DYNCFG && (!df->payload || !buffer_strlen(df->payload))) {
fprintf(stderr, "\n - DYNCFG job has no payload!");
errors++;
}
else if(df->user_disabled && !df->saves) {
fprintf(stderr, "\n - DYNCFG disabled config has no saves!");
errors++;
}
else if(t->source && string_strcmp(df->source, t->source) != 0) {
fprintf(stderr, "\n - source does not match!");
errors++;
}
else if(df->source && !t->source) {
fprintf(stderr, "\n - there is a source but it shouldn't be any!");
errors++;
}
else if(t->needs_save && df->saves <= t->last_saves) {
fprintf(stderr, "\n - should be saved, but it is not saved!");
errors++;
}
else if(!t->needs_save && df->saves > t->last_saves) {
fprintf(stderr, "\n - should be not be saved, but it saved!");
errors++;
}
cleanup:
if(errors) {
fprintf(stderr, "\n >>> FAILED\n\n");
dyncfg_unittest_register_error(NULL, NULL);
return false;
}
fprintf(stderr, " OK\n");
return true;
}
static void dyncfg_unittest_reset(void) {
TEST *t;
dfe_start_read(dyncfg_unittest_data.nodes, t) {
t->received = t->finished = false;
t->needs_save = false;
DYNCFG *df = dictionary_get(dyncfg_globals.nodes, t->id);
if(!df) {
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG UNITTEST: cannot find id '%s'", t->id);
dyncfg_unittest_register_error(NULL, NULL);
}
else
t->last_saves = df->saves;
}
dfe_done(t);
}
void should_be_saved(TEST *t, DYNCFG_CMDS c) {
DYNCFG *df;
if(t->type == DYNCFG_TYPE_TEMPLATE) {
df = dictionary_get(dyncfg_globals.nodes, t->id);
t->current.enabled = !df->user_disabled;
}
t->needs_save =
c == DYNCFG_CMD_UPDATE ||
(t->current.enabled && c == DYNCFG_CMD_DISABLE) ||
(!t->current.enabled && c == DYNCFG_CMD_ENABLE);
}
static int dyncfg_unittest_run(const char *cmd, BUFFER *wb, const char *payload, const char *source) {
dyncfg_unittest_reset();
char buf[strlen(cmd) + 1];
memcpy(buf, cmd, sizeof(buf));
char *words[MAX_FUNCTION_PARAMETERS]; // an array of pointers for the words in this line
size_t num_words = quoted_strings_splitter_pluginsd(buf, words, MAX_FUNCTION_PARAMETERS);
// const char *config = get_word(words, num_words, 0);
const char *id = get_word(words, num_words, 1);
char *action = get_word(words, num_words, 2);
const char *add_name = get_word(words, num_words, 3);
DYNCFG_CMDS c = dyncfg_cmds2id(action);
TEST *t = dictionary_get(dyncfg_unittest_data.nodes, id);
if(!t) {
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG UNITTEST: cannot find id '%s' from cmd: %s", id, cmd);
dyncfg_unittest_register_error(NULL, NULL);
return HTTP_RESP_NOT_FOUND;
}
if(t->type == DYNCFG_TYPE_TEMPLATE)
t->received = t->finished = true;
if(c == DYNCFG_CMD_DISABLE)
t->expected.enabled = false;
if(c == DYNCFG_CMD_ENABLE)
t->expected.enabled = true;
if(c == DYNCFG_CMD_UPDATE)
memset(&t->current.value, 0, sizeof(t->current.value));
buffer_flush(wb);
CLEAN_BUFFER *pld = NULL;
if(payload) {
pld = buffer_create(1024, NULL);
buffer_strcat(pld, payload);
}
should_be_saved(t, c);
int rc = rrd_function_run(localhost, wb, 10, HTTP_ACCESS_ADMIN, cmd,
true, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
pld, source);
if(!DYNCFG_RESP_SUCCESS(rc)) {
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG UNITTEST: failed to run: %s; returned code %d", cmd, rc);
dyncfg_unittest_register_error(NULL, NULL);
}
dyncfg_unittest_check(t, cmd, true);
if(rc == HTTP_RESP_OK && t->type == DYNCFG_TYPE_TEMPLATE) {
if(c == DYNCFG_CMD_ADD) {
char buf2[strlen(id) + strlen(add_name) + 2];
snprintfz(buf2, sizeof(buf2), "%s:%s", id, add_name);
TEST *tt = dictionary_get(dyncfg_unittest_data.nodes, buf2);
if (!tt) {
nd_log(NDLS_DAEMON, NDLP_ERR,
"DYNCFG UNITTEST: failed to find newly added id '%s' of command: %s",
id, cmd);
dyncfg_unittest_register_error(NULL, NULL);
}
dyncfg_unittest_check(tt, cmd, true);
}
else {
STRING *template = string_strdupz(t->id);
DYNCFG *df;
dfe_start_read(dyncfg_globals.nodes, df) {
if(df->type == DYNCFG_TYPE_JOB && df->template == template) {
TEST *tt = dictionary_get(dyncfg_unittest_data.nodes, df_dfe.name);
if (!tt) {
nd_log(NDLS_DAEMON, NDLP_ERR,
"DYNCFG UNITTEST: failed to find id '%s' while running command: %s", df_dfe.name, cmd);
dyncfg_unittest_register_error(NULL, NULL);
}
else {
if(c == DYNCFG_CMD_DISABLE)
tt->expected.enabled = false;
if(c == DYNCFG_CMD_ENABLE)
tt->expected.enabled = true;
dyncfg_unittest_check(tt, cmd, true);
}
}
}
dfe_done(df);
string_freez(template);
}
}
return rc;
}
static void dyncfg_unittest_cleanup_files(void) {
char path[PATH_MAX];
snprintfz(path, sizeof(path), "%s/%s", netdata_configured_varlib_dir, "config");
DIR *dir = opendir(path);
if (!dir) {
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG UNITTEST: cannot open directory '%s'", path);
return;
}
struct dirent *entry;
char filename[PATH_MAX];
while ((entry = readdir(dir)) != NULL) {
if ((entry->d_type == DT_REG || entry->d_type == DT_LNK) && strstartswith(entry->d_name, "unittest:") && strendswith(entry->d_name, ".dyncfg")) {
snprintf(filename, sizeof(filename), "%s/%s", path, entry->d_name);
nd_log(NDLS_DAEMON, NDLP_INFO, "DYNCFG UNITTEST: deleting file '%s'", filename);
unlink(filename);
}
}
closedir(dir);
}
static TEST *dyncfg_unittest_add(TEST t) {
dyncfg_unittest_reset();
TEST *ret = dictionary_set(dyncfg_unittest_data.nodes, t.id, &t, sizeof(t));
if(!dyncfg_add_low_level(localhost, t.id, "/unittests", DYNCFG_STATUS_RUNNING, t.type,
t.source_type, t.source,
t.cmds, 0, 0, t.sync, dyncfg_unittest_execute_cb, ret)) {
dyncfg_unittest_register_error(t.id, "addition of job failed");
}
dyncfg_unittest_check(ret, "plugin create", t.type != DYNCFG_TYPE_TEMPLATE);
return ret;
}
void dyncfg_unittest_delete_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
TEST *v = value;
freez((void *)v->id);
freez((void *)v->source);
}
int dyncfg_unittest(void) {
dyncfg_unittest_data.nodes = dictionary_create(DICT_OPTION_NONE);
dictionary_register_delete_callback(dyncfg_unittest_data.nodes, dyncfg_unittest_delete_cb, NULL);
dyncfg_unittest_cleanup_files();
rrd_functions_inflight_init();
dyncfg_init(false);
// ------------------------------------------------------------------------
// create the thread for testing async communication
netdata_thread_t thread;
netdata_thread_create(&thread, "unittest", NETDATA_THREAD_OPTION_JOINABLE,
dyncfg_unittest_thread_action, NULL);
// ------------------------------------------------------------------------
// single
TEST *single1 = dyncfg_unittest_add((TEST){
.id = strdupz("unittest:sync:single1"),
.source = strdupz(LINE_FILE_STR),
.type = DYNCFG_TYPE_SINGLE,
.cmds = DYNCFG_CMD_GET | DYNCFG_CMD_SCHEMA | DYNCFG_CMD_UPDATE | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE,
.source_type = DYNCFG_SOURCE_TYPE_INTERNAL,
.sync = true,
.current = {
.enabled = true,
},
.expected = {
.enabled = true,
}
}); (void)single1;
TEST *single2 = dyncfg_unittest_add((TEST){
.id = strdupz("unittest:async:single2"),
.source = strdupz(LINE_FILE_STR),
.type = DYNCFG_TYPE_SINGLE,
.cmds = DYNCFG_CMD_GET | DYNCFG_CMD_SCHEMA | DYNCFG_CMD_UPDATE | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE,
.source_type = DYNCFG_SOURCE_TYPE_INTERNAL,
.sync = false,
.current = {
.enabled = true,
},
.expected = {
.enabled = true,
}
}); (void)single2;
// ------------------------------------------------------------------------
// template
TEST *template1 = dyncfg_unittest_add((TEST){
.id = strdupz("unittest:sync:template1"),
.source = strdupz(LINE_FILE_STR),
.type = DYNCFG_TYPE_TEMPLATE,
.cmds = DYNCFG_CMD_SCHEMA | DYNCFG_CMD_ADD | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE,
.source_type = DYNCFG_SOURCE_TYPE_INTERNAL,
.sync = true,
}); (void)template1;
TEST *template2 = dyncfg_unittest_add((TEST){
.id = strdupz("unittest:async:template2"),
.source = strdupz(LINE_FILE_STR),
.type = DYNCFG_TYPE_TEMPLATE,
.cmds = DYNCFG_CMD_SCHEMA | DYNCFG_CMD_ADD | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE,
.source_type = DYNCFG_SOURCE_TYPE_INTERNAL,
.sync = false,
}); (void)template2;
// ------------------------------------------------------------------------
// job
TEST *user1 = dyncfg_unittest_add((TEST){
.id = strdupz("unittest:sync:template1:user1"),
.source = strdupz(LINE_FILE_STR),
.type = DYNCFG_TYPE_JOB,
.cmds = DYNCFG_CMD_SCHEMA | DYNCFG_CMD_UPDATE | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE,
.source_type = DYNCFG_SOURCE_TYPE_USER,
.sync = true,
.current = {
.enabled = true,
},
.expected = {
.enabled = true,
}
}); (void)user1;
TEST *user2 = dyncfg_unittest_add((TEST){
.id = strdupz("unittest:async:template2:user2"),
.source = strdupz(LINE_FILE_STR),
.type = DYNCFG_TYPE_JOB,
.cmds = DYNCFG_CMD_SCHEMA | DYNCFG_CMD_UPDATE | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE,
.source_type = DYNCFG_SOURCE_TYPE_USER,
.sync = false,
.expected = {
.enabled = true,
}
}); (void)user2;
// ------------------------------------------------------------------------
int rc; (void)rc;
BUFFER *wb = buffer_create(0, NULL);
// ------------------------------------------------------------------------
// dynamic job
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1 add dyn1", wb, "{\"double\":3.14,\"boolean\":true}", LINE_FILE_STR);
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1 add dyn2", wb, "{\"double\":3.14,\"boolean\":true}", LINE_FILE_STR);
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2 add dyn3", wb, "{\"double\":3.14,\"boolean\":true}", LINE_FILE_STR);
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2 add dyn4", wb, "{\"double\":3.14,\"boolean\":true}", LINE_FILE_STR);
// ------------------------------------------------------------------------
// saving of user_disabled
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:single1 disable", wb, NULL, LINE_FILE_STR);
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:single2 disable", wb, NULL, LINE_FILE_STR);
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1:user1 disable", wb, NULL, LINE_FILE_STR);
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2:user2 disable", wb, NULL, LINE_FILE_STR);
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1:dyn1 disable", wb, NULL, LINE_FILE_STR);
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1:dyn2 disable", wb, NULL, LINE_FILE_STR);
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2:dyn3 disable", wb, NULL, LINE_FILE_STR);
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2:dyn4 disable", wb, NULL, LINE_FILE_STR);
// ------------------------------------------------------------------------
// enabling
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:single1 enable", wb, NULL, LINE_FILE_STR);
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:single2 enable", wb, NULL, LINE_FILE_STR);
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1:user1 enable", wb, NULL, LINE_FILE_STR);
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2:user2 enable", wb, NULL, LINE_FILE_STR);
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1:dyn1 enable", wb, NULL, LINE_FILE_STR);
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1:dyn2 enable", wb, NULL, LINE_FILE_STR);
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2:dyn3 enable", wb, NULL, LINE_FILE_STR);
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2:dyn4 enable", wb, NULL, LINE_FILE_STR);
// ------------------------------------------------------------------------
// disabling template
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1 disable", wb, NULL, LINE_FILE_STR);
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2 disable", wb, NULL, LINE_FILE_STR);
// ------------------------------------------------------------------------
// enabling template
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1 enable", wb, NULL, LINE_FILE_STR);
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2 enable", wb, NULL, LINE_FILE_STR);
// ------------------------------------------------------------------------
// adding job on disabled template
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1 disable", wb, NULL, LINE_FILE_STR);
dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2 disable", wb, NULL, LINE_FILE_STR);
TEST *user3 = dyncfg_unittest_add((TEST){
.id = strdupz("unittest:sync:template1:user3"),
.source = strdupz(LINE_FILE_STR),
.type = DYNCFG_TYPE_JOB,
.cmds = DYNCFG_CMD_SCHEMA | DYNCFG_CMD_UPDATE | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE,
.source_type = DYNCFG_SOURCE_TYPE_USER,
.sync = true,
.expected = {
.enabled = false,
}
}); (void)user3;
TEST *user4 = dyncfg_unittest_add((TEST){
.id = strdupz("unittest:async:template2:user4"),
.source = strdupz(LINE_FILE_STR),
.type = DYNCFG_TYPE_JOB,
.cmds = DYNCFG_CMD_SCHEMA | DYNCFG_CMD_UPDATE | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE,
.source_type = DYNCFG_SOURCE_TYPE_USER,
.sync = false,
.expected = {
.enabled = false,
}
}); (void)user4;
TEST *user5 = dyncfg_unittest_add((TEST){
.id = strdupz("unittest:sync:template1:user5"),
.source = strdupz(LINE_FILE_STR),
.type = DYNCFG_TYPE_JOB,
.cmds = DYNCFG_CMD_SCHEMA | DYNCFG_CMD_UPDATE | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE,
.source_type = DYNCFG_SOURCE_TYPE_USER,
.sync = true,
.expected = {
.enabled = false,
}
}); (void)user5;
TEST *user6 = dyncfg_unittest_add((TEST){
.id = strdupz("unittest:async:template2:user6"),
.source = strdupz(LINE_FILE_STR),
.type = DYNCFG_TYPE_JOB,
.cmds = DYNCFG_CMD_SCHEMA | DYNCFG_CMD_UPDATE | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE,
.source_type = DYNCFG_SOURCE_TYPE_USER,
.sync = false,
.expected = {
.enabled = false,
}
}); (void)user6;
// dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1:user5 disable", wb, NULL, LINE_FILE_STR);
// dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2:user6 disable", wb, NULL, LINE_FILE_STR);
// // ------------------------------------------------------------------------
// // enable template with disabled jobs
//
// user3->expected.enabled = true;
// user5->expected.enabled = false;
// dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1 enable", wb, NULL, LINE_FILE_STR);
//
// user4->expected.enabled = true;
// user6->expected.enabled = false;
// dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2 enable", wb, NULL, LINE_FILE_STR);
// // ------------------------------------------------------------------------
//
// rc = dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " tree", wb, NULL);
// if(rc == HTTP_RESP_OK)
// fprintf(stderr, "%s\n", buffer_tostring(wb));
void *ptr;
netdata_thread_cancel(thread);
netdata_thread_join(thread, &ptr);
dyncfg_unittest_cleanup_files();
dictionary_destroy(dyncfg_unittest_data.nodes);
return __atomic_load_n(&dyncfg_unittest_data.errors, __ATOMIC_RELAXED) > 0 ? 1 : 0;
}

405
daemon/config/dyncfg.c Normal file
View file

@ -0,0 +1,405 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "dyncfg-internals.h"
#include "dyncfg.h"
struct dyncfg_globals dyncfg_globals = { 0 };
void dyncfg_cleanup(DYNCFG *v) {
buffer_free(v->payload);
v->payload = NULL;
string_freez(v->path);
v->path = NULL;
string_freez(v->source);
v->source = NULL;
string_freez(v->function);
v->function = NULL;
string_freez(v->template);
v->template = NULL;
}
static void dyncfg_normalize(DYNCFG *df) {
usec_t now_ut = now_realtime_usec();
if(!df->created_ut)
df->created_ut = now_ut;
if(!df->modified_ut)
df->modified_ut = now_ut;
}
static void dyncfg_delete_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
DYNCFG *df = value;
dyncfg_cleanup(df);
}
static void dyncfg_insert_cb(const DICTIONARY_ITEM *item, void *value, void *data __maybe_unused) {
DYNCFG *df = value;
dyncfg_normalize(df);
const char *id = dictionary_acquired_item_name(item);
char buf[strlen(id) + 20];
snprintfz(buf, sizeof(buf), PLUGINSD_FUNCTION_CONFIG " %s", id);
df->function = string_strdupz(buf);
if(df->type == DYNCFG_TYPE_JOB && !df->template) {
const char *last_colon = strrchr(id, ':');
if(last_colon)
df->template = string_strndupz(id, last_colon - id);
else
nd_log(NDLS_DAEMON, NDLP_WARNING,
"DYNCFG: id '%s' is a job, but does not contain a colon to find the template", id);
}
}
static void dyncfg_react_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
DYNCFG *df = value; (void)df;
;
}
static bool dyncfg_conflict_cb(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value, void *data __maybe_unused) {
DYNCFG *v = old_value;
DYNCFG *nv = new_value;
size_t changes = 0;
dyncfg_normalize(nv);
if(v->host != nv->host) {
SWAP(v->host, nv->host);
changes++;
}
if(v->path != nv->path) {
SWAP(v->path, nv->path);
changes++;
}
if(v->status != nv->status) {
SWAP(v->status, nv->status);
changes++;
}
if(v->type != nv->type) {
SWAP(v->type, nv->type);
changes++;
}
if(v->source_type != nv->source_type) {
SWAP(v->source_type, nv->source_type);
changes++;
}
if(v->cmds != nv->cmds) {
SWAP(v->cmds, nv->cmds);
changes++;
}
if(v->source != nv->source) {
SWAP(v->source, nv->source);
changes++;
}
if(nv->created_ut < v->created_ut) {
SWAP(v->created_ut, nv->created_ut);
changes++;
}
if(nv->modified_ut > v->modified_ut) {
SWAP(v->modified_ut, nv->modified_ut);
changes++;
}
if(v->sync != nv->sync) {
SWAP(v->sync, nv->sync);
changes++;
}
if(nv->payload) {
SWAP(v->payload, nv->payload);
changes++;
}
if(!v->execute_cb || (nv->overwrite_cb && nv->execute_cb && (v->execute_cb != nv->execute_cb || v->execute_cb_data != nv->execute_cb_data))) {
v->execute_cb = nv->execute_cb;
v->execute_cb_data = nv->execute_cb_data;
changes++;
}
dyncfg_cleanup(nv);
return changes > 0;
}
// ----------------------------------------------------------------------------
void dyncfg_init_low_level(bool load_saved) {
if(!dyncfg_globals.nodes) {
dyncfg_globals.nodes = dictionary_create_advanced(DICT_OPTION_FIXED_SIZE | DICT_OPTION_DONT_OVERWRITE_VALUE, NULL, sizeof(DYNCFG));
dictionary_register_insert_callback(dyncfg_globals.nodes, dyncfg_insert_cb, NULL);
dictionary_register_react_callback(dyncfg_globals.nodes, dyncfg_react_cb, NULL);
dictionary_register_conflict_callback(dyncfg_globals.nodes, dyncfg_conflict_cb, NULL);
dictionary_register_delete_callback(dyncfg_globals.nodes, dyncfg_delete_cb, NULL);
char path[PATH_MAX];
snprintfz(path, sizeof(path), "%s/%s", netdata_configured_varlib_dir, "config");
if(mkdir(path, 0755) == -1) {
if(errno != EEXIST)
nd_log(NDLS_DAEMON, NDLP_CRIT, "DYNCFG: failed to create dynamic configuration directory '%s'", path);
}
dyncfg_globals.dir = strdupz(path);
if(load_saved)
dyncfg_load_all();
}
}
// ----------------------------------------------------------------------------
const DICTIONARY_ITEM *dyncfg_add_internal(RRDHOST *host, const char *id, const char *path, DYNCFG_STATUS status, DYNCFG_TYPE type, DYNCFG_SOURCE_TYPE source_type, const char *source, DYNCFG_CMDS cmds, usec_t created_ut, usec_t modified_ut, bool sync, rrd_function_execute_cb_t execute_cb, void *execute_cb_data, bool overwrite_cb) {
DYNCFG tmp = {
.host = host,
.path = string_strdupz(path),
.status = status,
.type = type,
.cmds = cmds,
.source_type = source_type,
.source = string_strdupz(source),
.created_ut = created_ut,
.modified_ut = modified_ut,
.sync = sync,
.user_disabled = false,
.restart_required = false,
.payload = NULL,
.execute_cb = execute_cb,
.execute_cb_data = execute_cb_data,
.overwrite_cb = overwrite_cb,
};
uuid_copy(tmp.host_uuid, host->host_uuid);
return dictionary_set_and_acquire_item_advanced(dyncfg_globals.nodes, id, -1, &tmp, sizeof(tmp), NULL);
}
static void dyncfg_send_updates(const char *id) {
const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item_advanced(dyncfg_globals.nodes, id, -1);
if(!item) {
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: asked to update plugin for configuration '%s', but it is not found.", id);
return;
}
DYNCFG *df = dictionary_acquired_item_value(item);
if(df->type == DYNCFG_TYPE_SINGLE || df->type == DYNCFG_TYPE_JOB) {
if (df->cmds & DYNCFG_CMD_UPDATE)
dyncfg_echo_update(item, df, id);
}
else if(df->type == DYNCFG_TYPE_TEMPLATE && (df->cmds & DYNCFG_CMD_ADD)) {
STRING *template = string_strdupz(id);
size_t len = strlen(id);
DYNCFG *tf;
dfe_start_reentrant(dyncfg_globals.nodes, tf) {
const char *t_id = tf_dfe.name;
if(tf->type == DYNCFG_TYPE_JOB && tf->template == template && strncmp(t_id, id, len) == 0 && t_id[len] == ':' && t_id[len + 1]) {
dyncfg_echo_add(item, df, id, &t_id[len + 1]);
}
}
dfe_done(tf);
string_freez(template);
}
dictionary_acquired_item_release(dyncfg_globals.nodes, item);
}
bool dyncfg_is_user_disabled(const char *id) {
const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item(dyncfg_globals.nodes, id);
if(!item)
return false;
DYNCFG *df = dictionary_acquired_item_value(item);
bool ret = df->user_disabled;
dictionary_acquired_item_release(dyncfg_globals.nodes, item);
return ret;
}
bool dyncfg_job_has_registered_template(const char *id) {
char buf[strlen(id) + 1];
memcpy(buf, id, sizeof(buf));
char *colon = strrchr(buf, ':');
if(!colon)
return false;
*colon = '\0';
const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item(dyncfg_globals.nodes, buf);
if(!item)
return false;
DYNCFG *df = dictionary_acquired_item_value(item);
bool ret = df->type == DYNCFG_TYPE_TEMPLATE;
dictionary_acquired_item_release(dyncfg_globals.nodes, item);
return ret;
}
bool dyncfg_add_low_level(RRDHOST *host, const char *id, const char *path, DYNCFG_STATUS status, DYNCFG_TYPE type, DYNCFG_SOURCE_TYPE source_type, const char *source, DYNCFG_CMDS cmds, usec_t created_ut, usec_t modified_ut, bool sync, rrd_function_execute_cb_t execute_cb, void *execute_cb_data) {
if(!dyncfg_is_valid_id(id)) {
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: id '%s' is invalid. Ignoring dynamic configuration for it.", id);
return false;
}
if(type == DYNCFG_TYPE_JOB && !dyncfg_job_has_registered_template(id)) {
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: job id '%s' does not have a registered template. Ignoring dynamic configuration for it.", id);
return false;
}
DYNCFG_CMDS old_cmds = cmds;
// all configurations support schema
cmds |= DYNCFG_CMD_SCHEMA;
// if there is either enable or disable, both are supported
if(cmds & (DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE))
cmds |= DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE;
// add
if(type == DYNCFG_TYPE_TEMPLATE) {
// templates must always support "add"
cmds |= DYNCFG_CMD_ADD;
}
else {
// only templates can have "add"
cmds &= ~DYNCFG_CMD_ADD;
}
// remove
if(source_type == DYNCFG_SOURCE_TYPE_DYNCFG && type == DYNCFG_TYPE_JOB) {
// remove is only available for dyncfg jobs
cmds |= DYNCFG_CMD_REMOVE;
}
else {
// remove is only available for dyncfg jobs
cmds &= ~DYNCFG_CMD_REMOVE;
}
// data
if(type == DYNCFG_TYPE_TEMPLATE) {
// templates do not have data
cmds &= ~(DYNCFG_CMD_GET | DYNCFG_CMD_UPDATE | DYNCFG_CMD_TEST);
}
if(cmds != old_cmds) {
CLEAN_BUFFER *t = buffer_create(1024, NULL);
buffer_sprintf(t, "DYNCFG: id '%s' was declared with cmds: ", id);
dyncfg_cmds2buffer(old_cmds, t);
buffer_strcat(t, ", but they have sanitized to: ");
dyncfg_cmds2buffer(cmds, t);
nd_log(NDLS_DAEMON, NDLP_NOTICE, "%s", buffer_tostring(t));
}
const DICTIONARY_ITEM *item = dyncfg_add_internal(host, id, path, status, type, source_type, source, cmds, created_ut, modified_ut, sync, execute_cb, execute_cb_data, true);
DYNCFG *df = dictionary_acquired_item_value(item);
// if(df->source_type == DYNCFG_SOURCE_TYPE_DYNCFG && !df->saves)
// nd_log(NDLS_DAEMON, NDLP_WARNING, "DYNCFG: configuration '%s' is created with source type dyncfg, but we don't have a saved configuration for it", id);
rrd_collector_started();
rrd_function_add(
host,
NULL,
string2str(df->function),
120,
1000,
"Dynamic configuration",
"config",
HTTP_ACCESS_ADMIN,
sync,
dyncfg_function_intercept_cb,
NULL);
DYNCFG_STATUS status_to_send_to_plugin = df->user_disabled ? DYNCFG_CMD_DISABLE : DYNCFG_CMD_ENABLE;
if(status_to_send_to_plugin == DYNCFG_CMD_ENABLE && dyncfg_is_user_disabled(string2str(df->template)))
status_to_send_to_plugin = DYNCFG_CMD_DISABLE;
dyncfg_echo(item, df, id, status_to_send_to_plugin);
dyncfg_send_updates(id);
dictionary_acquired_item_release(dyncfg_globals.nodes, item);
return true;
}
void dyncfg_del_low_level(RRDHOST *host, const char *id) {
if(!dyncfg_is_valid_id(id)) {
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: id '%s' is invalid. Ignoring dynamic configuration for it.", id);
return;
}
const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item(dyncfg_globals.nodes, id);
if(item) {
DYNCFG *df = dictionary_acquired_item_value(item);
rrd_function_del(host, NULL, string2str(df->function));
bool garbage_collect = false;
if(df->saves == 0) {
dictionary_del(dyncfg_globals.nodes, id);
garbage_collect = true;
}
dictionary_acquired_item_release(dyncfg_globals.nodes, item);
if(garbage_collect)
dictionary_garbage_collect(dyncfg_globals.nodes);
}
}
void dyncfg_status_low_level(RRDHOST *host __maybe_unused, const char *id, DYNCFG_STATUS status) {
if(!dyncfg_is_valid_id(id)) {
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: id '%s' is invalid. Ignoring dynamic configuration for it.", id);
return;
}
if(status == DYNCFG_STATUS_NONE) {
nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: status provided to id '%s' is invalid. Ignoring it.", id);
return;
}
const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item(dyncfg_globals.nodes, id);
if(item) {
DYNCFG *df = dictionary_acquired_item_value(item);
df->status = status;
dictionary_acquired_item_release(dyncfg_globals.nodes, item);
}
}
// ----------------------------------------------------------------------------
void dyncfg_add_streaming(BUFFER *wb) {
// when sending config functions to parents, we send only 1 function called 'config';
// the parent will send the command to the child, and the child will validate it;
// this way the parent does not need to receive removals of config functions;
buffer_sprintf(wb
, PLUGINSD_KEYWORD_FUNCTION " GLOBAL " PLUGINSD_FUNCTION_CONFIG " %d \"%s\" \"%s\" \"%s\" %d\n"
, 120
, "Dynamic configuration"
, "config"
, http_id2access(HTTP_ACCESS_ADMIN)
, 1000
);
}
bool dyncfg_available_for_rrdhost(RRDHOST *host) {
if(host == localhost || rrdhost_option_check(host, RRDHOST_OPTION_VIRTUAL_HOST))
return true;
return rrd_function_available(host, PLUGINSD_FUNCTION_CONFIG);
}
// ----------------------------------------------------------------------------

31
daemon/config/dyncfg.h Normal file
View file

@ -0,0 +1,31 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#ifndef NETDATA_DYNCFG_H
#define NETDATA_DYNCFG_H
#include "../common.h"
#include "../../database/rrd.h"
#include "../../database/rrdfunctions.h"
void dyncfg_add_streaming(BUFFER *wb);
bool dyncfg_available_for_rrdhost(RRDHOST *host);
void dyncfg_host_init(RRDHOST *host);
// low-level API used by plugins.d and high-level API
bool dyncfg_add_low_level(RRDHOST *host, const char *id, const char *path, DYNCFG_STATUS status, DYNCFG_TYPE type,
DYNCFG_SOURCE_TYPE source_type, const char *source, DYNCFG_CMDS cmds,
usec_t created_ut, usec_t modified_ut, bool sync,
rrd_function_execute_cb_t execute_cb, void *execute_cb_data);
void dyncfg_del_low_level(RRDHOST *host, const char *id);
void dyncfg_status_low_level(RRDHOST *host, const char *id, DYNCFG_STATUS status);
void dyncfg_init_low_level(bool load_saved);
// high-level API for internal modules
bool dyncfg_add(RRDHOST *host, const char *id, const char *path, DYNCFG_STATUS status, DYNCFG_TYPE type,
DYNCFG_SOURCE_TYPE source_type, const char *source, DYNCFG_CMDS cmds, dyncfg_cb_t cb, void *data);
void dyncfg_del(RRDHOST *host, const char *id);
void dyncfg_status(RRDHOST *host, const char *id, DYNCFG_STATUS status);
void dyncfg_init(bool load_saved);
#endif //NETDATA_DYNCFG_H

View file

@ -1404,6 +1404,24 @@ void bearer_tokens_init(void);
int unittest_rrdpush_compressions(void);
int uuid_unittest(void);
int progress_unittest(void);
int dyncfg_unittest(void);
int unittest_prepare_rrd(char **user) {
post_conf_load(user);
get_netdata_configured_variables();
default_rrd_update_every = 1;
default_rrd_memory_mode = RRD_MEMORY_MODE_RAM;
default_health_enabled = 0;
storage_tiers = 1;
registry_init();
if(rrd_init("unittest", NULL, true)) {
fprintf(stderr, "rrd_init failed for unittest\n");
return 1;
}
default_rrdpush_enabled = 0;
return 0;
}
int main(int argc, char **argv) {
// initialize the system clocks
@ -1523,49 +1541,28 @@ int main(int argc, char **argv) {
if(strcmp(optarg, "unittest") == 0) {
unittest_running = true;
if (pluginsd_parser_unittest())
return 1;
if (pluginsd_parser_unittest()) return 1;
if (unit_test_static_threads()) return 1;
if (unit_test_buffer()) return 1;
if (unit_test_str2ld()) return 1;
if (buffer_unittest()) return 1;
if (unit_test_bitmaps()) return 1;
if (unit_test_static_threads())
return 1;
if (unit_test_buffer())
return 1;
if (unit_test_str2ld())
return 1;
if (buffer_unittest())
return 1;
if (unit_test_bitmaps())
return 1;
// No call to load the config file on this code-path
post_conf_load(&user);
get_netdata_configured_variables();
default_rrd_update_every = 1;
default_rrd_memory_mode = RRD_MEMORY_MODE_RAM;
default_health_enabled = 0;
storage_tiers = 1;
registry_init();
if(rrd_init("unittest", NULL, true)) {
fprintf(stderr, "rrd_init failed for unittest\n");
return 1;
}
default_rrdpush_enabled = 0;
if(run_all_mockup_tests()) return 1;
if(unit_test_storage()) return 1;
if (unittest_prepare_rrd(&user)) return 1;
if (run_all_mockup_tests()) return 1;
if (unit_test_storage()) return 1;
#ifdef ENABLE_DBENGINE
if(test_dbengine()) return 1;
if (test_dbengine()) return 1;
#endif
if(test_sqlite()) return 1;
if(string_unittest(10000)) return 1;
if (dictionary_unittest(10000))
return 1;
if(aral_unittest(10000))
return 1;
if (rrdlabels_unittest())
return 1;
if (ctx_unittest())
return 1;
if (uuid_unittest())
return 1;
if (test_sqlite()) return 1;
if (string_unittest(10000)) return 1;
if (dictionary_unittest(10000)) return 1;
if (aral_unittest(10000)) return 1;
if (rrdlabels_unittest()) return 1;
if (ctx_unittest()) return 1;
if (uuid_unittest()) return 1;
if (dyncfg_unittest()) return 1;
fprintf(stderr, "\n\nALL TESTS PASSED\n\n");
return 0;
}
@ -1633,6 +1630,12 @@ int main(int argc, char **argv) {
unittest_running = true;
return progress_unittest();
}
else if(strcmp(optarg, "dyncfgtest") == 0) {
unittest_running = true;
if(unittest_prepare_rrd(&user))
return 1;
return dyncfg_unittest();
}
else if(strncmp(optarg, createdataset_string, strlen(createdataset_string)) == 0) {
optarg += strlen(createdataset_string);
unsigned history_seconds = strtoul(optarg, NULL, 0);
@ -2110,7 +2113,7 @@ int main(int argc, char **argv) {
setenv("HOME", netdata_configured_home_dir, 1);
dyn_conf_init();
dyncfg_init(true);
netdata_log_info("netdata started on pid %d.", getpid());

View file

@ -195,15 +195,6 @@ const struct netdata_static_thread static_threads_common[] = {
.init_routine = NULL,
.start_routine = profile_main
},
{
.name = "DYNCFG",
.config_section = NULL,
.config_name = NULL,
.enabled = 1,
.thread = NULL,
.init_routine = NULL,
.start_routine = dyncfg_main
},
// terminator
{

View file

@ -729,6 +729,15 @@ static void agent_capabilities_to_json(BUFFER *wb, RRDHOST *host, const char *ke
freez(capas);
}
static inline void host_dyncfg_to_json_v2(BUFFER *wb, const char *key, RRDHOST_STATUS *s) {
buffer_json_member_add_object(wb, key);
{
buffer_json_member_add_string(wb, "status", rrdhost_dyncfg_status_to_string(s->dyncfg.status));
}
buffer_json_object_close(wb); // health
}
static inline void rrdhost_health_to_json_v2(BUFFER *wb, const char *key, RRDHOST_STATUS *s) {
buffer_json_member_add_object(wb, key);
{
@ -841,6 +850,8 @@ static void rrdcontext_to_json_v2_rrdhost(BUFFER *wb, RRDHOST *host, struct rrdc
host_functions2json(host, wb); // functions
agent_capabilities_to_json(wb, host, "capabilities");
host_dyncfg_to_json_v2(wb, "dyncfg", &s);
}
buffer_json_object_close(wb); // this instance
buffer_json_array_close(wb); // instances
@ -917,7 +928,7 @@ static ssize_t rrdcontext_to_json_v2_add_host(void *data, RRDHOST *host, bool qu
.node_ids = &ctl->nodes.ni,
.help = NULL,
.tags = NULL,
.access = HTTP_ACCESS_MEMBERS,
.access = HTTP_ACCESS_MEMBER,
.priority = RRDFUNCTIONS_PRIORITY_DEFAULT,
};
host_functions_to_dict(host, ctl->functions.dict, &t, sizeof(t), &t.help, &t.tags, &t.access, &t.priority);

View file

@ -1358,8 +1358,6 @@ struct rrdhost {
netdata_mutex_t aclk_state_lock;
aclk_rrdhost_state aclk_state;
DICTIONARY *configurable_plugins; // configurable plugins for this host
struct rrdhost *next;
struct rrdhost *prev;
};

View file

@ -190,11 +190,11 @@ const RRDCALC_ACQUIRED *rrdcalc_from_rrdset_get(RRDSET *st, const char *alert_na
char key[RRDCALC_MAX_KEY_SIZE + 1];
size_t key_len = rrdcalc_key(key, RRDCALC_MAX_KEY_SIZE, rrdset_id(st), alert_name);
const RRDCALC_ACQUIRED *rca = (const RRDCALC_ACQUIRED *)dictionary_get_and_acquire_item_advanced(st->rrdhost->rrdcalc_root_index, key, (ssize_t)(key_len + 1));
const RRDCALC_ACQUIRED *rca = (const RRDCALC_ACQUIRED *)dictionary_get_and_acquire_item_advanced(st->rrdhost->rrdcalc_root_index, key, (ssize_t)key_len);
if(!rca) {
key_len = rrdcalc_key(key, RRDCALC_MAX_KEY_SIZE, rrdset_name(st), alert_name);
rca = (const RRDCALC_ACQUIRED *)dictionary_get_and_acquire_item_advanced(st->rrdhost->rrdcalc_root_index, key, (ssize_t)(key_len + 1));
rca = (const RRDCALC_ACQUIRED *)dictionary_get_and_acquire_item_advanced(st->rrdhost->rrdcalc_root_index, key, (ssize_t)key_len);
}
return rca;
@ -727,7 +727,7 @@ void rrdcalc_add_from_rrdcalctemplate(RRDHOST *host, RRDCALCTEMPLATE *rt, RRDSET
.existing_from_template = false,
};
dictionary_set_advanced(host->rrdcalc_root_index, key, (ssize_t)(key_len + 1), NULL, sizeof(RRDCALC), &tmp);
dictionary_set_advanced(host->rrdcalc_root_index, key, (ssize_t)key_len, NULL, sizeof(RRDCALC), &tmp);
if(tmp.react_action != RRDCALC_REACT_NEW && tmp.existing_from_template == false)
netdata_log_error("RRDCALC: from template '%s' on chart '%s' with key '%s', failed to be added to host '%s'. It is manually configured.",
string2str(rt->name), rrdset_id(st), key, rrdhost_hostname(host));
@ -761,7 +761,7 @@ int rrdcalc_add_from_config(RRDHOST *host, RRDCALC *rc) {
};
int ret = 1;
RRDCALC *t = dictionary_set_advanced(host->rrdcalc_root_index, key, (ssize_t)(key_len + 1), rc, sizeof(RRDCALC), &tmp);
RRDCALC *t = dictionary_set_advanced(host->rrdcalc_root_index, key, (ssize_t)key_len, rc, sizeof(RRDCALC), &tmp);
if(tmp.react_action == RRDCALC_REACT_NEW) {
// we copied rc into the dictionary, so we have to free the container here
freez(rc);
@ -795,7 +795,7 @@ static void rrdcalc_unlink_and_delete(RRDHOST *host, RRDCALC *rc, bool having_ll
if(rc->rrdset)
rrdcalc_unlink_from_rrdset(rc, having_ll_wrlock);
dictionary_del_advanced(host->rrdcalc_root_index, string2str(rc->key), (ssize_t)string_strlen(rc->key) + 1);
dictionary_del_advanced(host->rrdcalc_root_index, string2str(rc->key), (ssize_t)string_strlen(rc->key));
}

View file

@ -231,7 +231,7 @@ void rrdcalctemplate_add_from_config(RRDHOST *host, RRDCALCTEMPLATE *rt) {
size_t key_len = snprintfz(key, RRDCALCTEMPLATE_MAX_KEY_SIZE, "%s", rrdcalctemplate_name(rt));
bool added = false;
dictionary_set_advanced(host->rrdcalctemplate_root_index, key, (ssize_t)(key_len + 1), rt, sizeof(*rt), &added);
dictionary_set_advanced(host->rrdcalctemplate_root_index, key, (ssize_t)key_len, rt, sizeof(*rt), &added);
if(added)
freez(rt);

View file

@ -0,0 +1,17 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#ifndef NETDATA_RRDCOLLECTOR_INTERNALS_H
#define NETDATA_RRDCOLLECTOR_INTERNALS_H
#include "rrd.h"
struct rrd_collector;
struct rrd_collector *rrd_collector_acquire_current_thread(void);
void rrd_collector_release(struct rrd_collector *rdc);
extern __thread struct rrd_collector *thread_rrd_collector;
bool rrd_collector_running(struct rrd_collector *rdc);
pid_t rrd_collector_tid(struct rrd_collector *rdc);
bool rrd_collector_dispatcher_acquire(struct rrd_collector *rdc);
void rrd_collector_dispatcher_release(struct rrd_collector *rdc);
#endif //NETDATA_RRDCOLLECTOR_INTERNALS_H

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#define NETDATA_RRDCOLLECTOR_INTERNALS
#include "rrdcollector.h"
#include "rrdcollector-internals.h"
// Each function points to this collector structure
// so that when the collector exits, all of them will

View file

@ -5,22 +5,6 @@
#include "rrd.h"
#ifdef NETDATA_RRDCOLLECTOR_INTERNALS
// ----------------------------------------------------------------------------
// private API
struct rrd_collector;
struct rrd_collector *rrd_collector_acquire_current_thread(void);
void rrd_collector_release(struct rrd_collector *rdc);
extern __thread struct rrd_collector *thread_rrd_collector;
bool rrd_collector_running(struct rrd_collector *rdc);
pid_t rrd_collector_tid(struct rrd_collector *rdc);
bool rrd_collector_dispatcher_acquire(struct rrd_collector *rdc);
void rrd_collector_dispatcher_release(struct rrd_collector *rdc);
#endif // NETDATA_RRDCOLLECTOR_INTERNALS
// ----------------------------------------------------------------------------
// public API

View file

@ -243,7 +243,7 @@ void rrddimvar_add_and_leave_released(RRDDIM *rd, RRDVAR_TYPE type, const char *
.value = value,
.rrddim = rd
};
dictionary_set_advanced(rd->rrdset->rrddimvar_root_index, key, (ssize_t)(key_len + 1), NULL, sizeof(RRDDIMVAR), &tmp);
dictionary_set_advanced(rd->rrdset->rrddimvar_root_index, key, (ssize_t)key_len, NULL, sizeof(RRDDIMVAR), &tmp);
}
void rrddimvar_rename_all(RRDDIM *rd) {

View file

@ -0,0 +1,164 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#define NETDATA_RRD_INTERNALS
#include "rrdfunctions-internals.h"
#include "rrdfunctions-exporters.h"
void rrd_chart_functions_expose_rrdpush(RRDSET *st, BUFFER *wb) {
if(!st->functions_view)
return;
struct rrd_host_function *t;
dfe_start_read(st->functions_view, t) {
if(t->options & RRD_FUNCTION_DYNCFG) continue;
buffer_sprintf(wb
, PLUGINSD_KEYWORD_FUNCTION " \"%s\" %d \"%s\" \"%s\" \"%s\" %d\n"
, t_dfe.name
, t->timeout
, string2str(t->help)
, string2str(t->tags)
, http_id2access(t->access)
,
t->priority
);
}
dfe_done(t);
}
void rrd_global_functions_expose_rrdpush(RRDHOST *host, BUFFER *wb, bool dyncfg) {
rrdhost_flag_clear(host, RRDHOST_FLAG_GLOBAL_FUNCTIONS_UPDATED);
size_t configs = 0;
struct rrd_host_function *tmp;
dfe_start_read(host->functions, tmp) {
if(tmp->options & RRD_FUNCTION_LOCAL) continue;
if(tmp->options & RRD_FUNCTION_DYNCFG) {
// we should not send dyncfg to this parent
configs++;
continue;
}
buffer_sprintf(wb
, PLUGINSD_KEYWORD_FUNCTION " GLOBAL \"%s\" %d \"%s\" \"%s\" \"%s\" %d\n"
, tmp_dfe.name
, tmp->timeout
, string2str(tmp->help)
, string2str(tmp->tags)
, http_id2access(tmp->access)
, tmp->priority
);
}
dfe_done(tmp);
if(dyncfg && configs)
dyncfg_add_streaming(wb);
}
static void functions2json(DICTIONARY *functions, BUFFER *wb) {
struct rrd_host_function *t;
dfe_start_read(functions, t) {
if (!rrd_collector_running(t->collector)) continue;
if(t->options & RRD_FUNCTION_DYNCFG) continue;
buffer_json_member_add_object(wb, t_dfe.name);
{
buffer_json_member_add_string_or_empty(wb, "help", string2str(t->help));
buffer_json_member_add_int64(wb, "timeout", (int64_t) t->timeout);
char options[65];
snprintfz(
options, 64
, "%s%s"
, (t->options & RRD_FUNCTION_LOCAL) ? "LOCAL " : ""
, (t->options & RRD_FUNCTION_GLOBAL) ? "GLOBAL" : ""
);
buffer_json_member_add_string_or_empty(wb, "options", options);
buffer_json_member_add_string_or_empty(wb, "tags", string2str(t->tags));
buffer_json_member_add_string(wb, "access", http_id2access(t->access));
buffer_json_member_add_uint64(wb, "priority", t->priority);
}
buffer_json_object_close(wb);
}
dfe_done(t);
}
void chart_functions2json(RRDSET *st, BUFFER *wb) {
if(!st || !st->functions_view) return;
functions2json(st->functions_view, wb);
}
void host_functions2json(RRDHOST *host, BUFFER *wb) {
if(!host || !host->functions) return;
buffer_json_member_add_object(wb, "functions");
struct rrd_host_function *t;
dfe_start_read(host->functions, t) {
if(!rrd_collector_running(t->collector)) continue;
if(t->options & RRD_FUNCTION_DYNCFG) continue;
buffer_json_member_add_object(wb, t_dfe.name);
{
buffer_json_member_add_string(wb, "help", string2str(t->help));
buffer_json_member_add_int64(wb, "timeout", t->timeout);
buffer_json_member_add_array(wb, "options");
{
if (t->options & RRD_FUNCTION_GLOBAL)
buffer_json_add_array_item_string(wb, "GLOBAL");
if (t->options & RRD_FUNCTION_LOCAL)
buffer_json_add_array_item_string(wb, "LOCAL");
}
buffer_json_array_close(wb);
buffer_json_member_add_string(wb, "tags", string2str(t->tags));
buffer_json_member_add_string(wb, "access", http_id2access(t->access));
buffer_json_member_add_uint64(wb, "priority", t->priority);
}
buffer_json_object_close(wb);
}
dfe_done(t);
buffer_json_object_close(wb);
}
void chart_functions_to_dict(DICTIONARY *rrdset_functions_view, DICTIONARY *dst, void *value, size_t value_size) {
if(!rrdset_functions_view || !dst) return;
struct rrd_host_function *t;
dfe_start_read(rrdset_functions_view, t) {
if(!rrd_collector_running(t->collector)) continue;
if(t->options & RRD_FUNCTION_DYNCFG) continue;
dictionary_set(dst, t_dfe.name, value, value_size);
}
dfe_done(t);
}
void host_functions_to_dict(RRDHOST *host, DICTIONARY *dst, void *value, size_t value_size, STRING **help, STRING **tags, HTTP_ACCESS *access, int *priority) {
if(!host || !host->functions || !dictionary_entries(host->functions) || !dst) return;
struct rrd_host_function *t;
dfe_start_read(host->functions, t) {
if(!rrd_collector_running(t->collector)) continue;
if(t->options & RRD_FUNCTION_DYNCFG) continue;
if(help)
*help = t->help;
if(tags)
*tags = t->tags;
if(access)
*access = t->access;
if(priority)
*priority = t->priority;
dictionary_set(dst, t_dfe.name, value, value_size);
}
dfe_done(t);
}

View file

@ -0,0 +1,16 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#ifndef NETDATA_RRDFUNCTIONS_EXPORTERS_H
#define NETDATA_RRDFUNCTIONS_EXPORTERS_H
#include "rrd.h"
void rrd_chart_functions_expose_rrdpush(RRDSET *st, BUFFER *wb);
void rrd_global_functions_expose_rrdpush(RRDHOST *host, BUFFER *wb, bool dyncfg);
void chart_functions2json(RRDSET *st, BUFFER *wb);
void chart_functions_to_dict(DICTIONARY *rrdset_functions_view, DICTIONARY *dst, void *value, size_t value_size);
void host_functions_to_dict(RRDHOST *host, DICTIONARY *dst, void *value, size_t value_size, STRING **help, STRING **tags, HTTP_ACCESS *access, int *priority);
void host_functions2json(RRDHOST *host, BUFFER *wb);
#endif //NETDATA_RRDFUNCTIONS_EXPORTERS_H

View file

@ -0,0 +1,641 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#define NETDATA_RRD_INTERNALS
#include "rrdcollector-internals.h"
#include "rrdfunctions-internals.h"
#include "rrdfunctions-inflight.h"
struct rrd_function_inflight {
bool used;
RRDHOST *host;
uuid_t transaction_uuid;
const char *transaction;
const char *cmd;
const char *sanitized_cmd;
const char *source;
size_t sanitized_cmd_length;
int timeout;
bool cancelled;
usec_t stop_monotonic_ut;
BUFFER *payload;
const DICTIONARY_ITEM *host_function_acquired;
// the collector
// we acquire this structure at the beginning,
// and we release it at the end
struct rrd_host_function *rdcf;
struct {
BUFFER *wb;
// in async mode,
// the function to call to send the result back
rrd_function_result_callback_t cb;
void *data;
} result;
struct {
// to be called in sync mode
// while the function is running
// to check if the function has been canceled
rrd_function_is_cancelled_cb_t cb;
void *data;
} is_cancelled;
struct {
// to be registered by the function itself
// used to signal the function to cancel
rrd_function_cancel_cb_t cb;
void *data;
} canceller;
struct {
// callback to receive progress reports from function
rrd_function_progress_cb_t cb;
void *data;
} progress;
struct {
// to be registered by the function itself
// used to send progress requests to function
rrd_function_progresser_cb_t cb;
void *data;
} progresser;
};
static DICTIONARY *rrd_functions_inflight_requests = NULL;
static void rrd_function_cancel_inflight(struct rrd_function_inflight *r);
// ----------------------------------------------------------------------------
static void rrd_functions_inflight_cleanup(struct rrd_function_inflight *r) {
buffer_free(r->payload);
freez((void *)r->transaction);
freez((void *)r->cmd);
freez((void *)r->sanitized_cmd);
freez((void *)r->source);
r->payload = NULL;
r->transaction = NULL;
r->cmd = NULL;
r->sanitized_cmd = NULL;
}
static void rrd_functions_inflight_delete_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
struct rrd_function_inflight *r = value;
// internal_error(true, "FUNCTIONS: transaction '%s' finished", r->transaction);
rrd_functions_inflight_cleanup(r);
dictionary_acquired_item_release(r->host->functions, r->host_function_acquired);
}
void rrd_functions_inflight_init(void) {
if(rrd_functions_inflight_requests)
return;
rrd_functions_inflight_requests = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct rrd_function_inflight));
dictionary_register_delete_callback(rrd_functions_inflight_requests, rrd_functions_inflight_delete_cb, NULL);
}
void rrd_functions_inflight_destroy(void) {
if(!rrd_functions_inflight_requests)
return;
dictionary_destroy(rrd_functions_inflight_requests);
rrd_functions_inflight_requests = NULL;
}
static void rrd_inflight_async_function_register_canceller_cb(void *register_canceller_cb_data, rrd_function_cancel_cb_t canceller_cb, void *canceller_cb_data) {
struct rrd_function_inflight *r = register_canceller_cb_data;
r->canceller.cb = canceller_cb;
r->canceller.data = canceller_cb_data;
}
static void rrd_inflight_async_function_register_progresser_cb(void *register_progresser_cb_data, rrd_function_progresser_cb_t progresser_cb, void *progresser_cb_data) {
struct rrd_function_inflight *r = register_progresser_cb_data;
r->progresser.cb = progresser_cb;
r->progresser.data = progresser_cb_data;
}
// ----------------------------------------------------------------------------
// waiting for async function completion
struct rrd_function_call_wait {
RRDHOST *host;
const DICTIONARY_ITEM *host_function_acquired;
char *transaction;
bool free_with_signal;
bool data_are_ready;
netdata_mutex_t mutex;
pthread_cond_t cond;
int code;
};
static void rrd_inflight_function_cleanup(RRDHOST *host __maybe_unused, const char *transaction) {
dictionary_del(rrd_functions_inflight_requests, transaction);
dictionary_garbage_collect(rrd_functions_inflight_requests);
}
static void rrd_function_call_wait_free(struct rrd_function_call_wait *tmp) {
rrd_inflight_function_cleanup(tmp->host, tmp->transaction);
freez(tmp->transaction);
pthread_cond_destroy(&tmp->cond);
netdata_mutex_destroy(&tmp->mutex);
freez(tmp);
}
static void rrd_async_function_signal_when_ready(BUFFER *temp_wb __maybe_unused, int code, void *callback_data) {
struct rrd_function_call_wait *tmp = callback_data;
bool we_should_free = false;
netdata_mutex_lock(&tmp->mutex);
// since we got the mutex,
// the waiting thread is either in pthread_cond_timedwait()
// or gave up and left.
tmp->code = code;
tmp->data_are_ready = true;
if(tmp->free_with_signal)
we_should_free = true;
pthread_cond_signal(&tmp->cond);
netdata_mutex_unlock(&tmp->mutex);
if(we_should_free) {
buffer_free(temp_wb);
rrd_function_call_wait_free(tmp);
}
}
static void rrd_inflight_async_function_nowait_finished(BUFFER *wb, int code, void *data) {
struct rrd_function_inflight *r = data;
if(r->result.cb)
r->result.cb(wb, code, r->result.data);
rrd_inflight_function_cleanup(r->host, r->transaction);
}
static bool rrd_inflight_async_function_is_cancelled(void *data) {
struct rrd_function_inflight *r = data;
return __atomic_load_n(&r->cancelled, __ATOMIC_RELAXED);
}
static inline int rrd_call_function_async_and_dont_wait(struct rrd_function_inflight *r) {
struct rrd_function_execute rfe = {
.transaction = &r->transaction_uuid,
.function = r->sanitized_cmd,
.payload = r->payload,
.source = r->source,
.stop_monotonic_ut = &r->stop_monotonic_ut,
.result = {
.wb = r->result.wb,
.cb = rrd_inflight_async_function_nowait_finished,
.data = r,
},
.progress = {
.cb = r->progress.cb,
.data = r->progress.data,
},
.is_cancelled = {
.cb = rrd_inflight_async_function_is_cancelled,
.data = r,
},
.register_canceller = {
.cb = rrd_inflight_async_function_register_canceller_cb,
.data = r,
},
.register_progresser = {
.cb = rrd_inflight_async_function_register_progresser_cb,
.data = r,
},
};
int code = r->rdcf->execute_cb(&rfe, r->rdcf->execute_cb_data);
return code;
}
static int rrd_call_function_async_and_wait(struct rrd_function_inflight *r) {
struct rrd_function_call_wait *tmp = mallocz(sizeof(struct rrd_function_call_wait));
tmp->free_with_signal = false;
tmp->data_are_ready = false;
tmp->host = r->host;
tmp->host_function_acquired = r->host_function_acquired;
tmp->transaction = strdupz(r->transaction);
netdata_mutex_init(&tmp->mutex);
pthread_cond_init(&tmp->cond, NULL);
// we need a temporary BUFFER, because we may time out and the caller supplied one may vanish,
// so we create a new one we guarantee will survive until the collector finishes...
bool we_should_free = false;
BUFFER *temp_wb = buffer_create(1024, &netdata_buffers_statistics.buffers_functions); // we need it because we may give up on it
temp_wb->content_type = r->result.wb->content_type;
struct rrd_function_execute rfe = {
.transaction = &r->transaction_uuid,
.function = r->sanitized_cmd,
.payload = r->payload,
.source = r->source,
.stop_monotonic_ut = &r->stop_monotonic_ut,
.result = {
.wb = temp_wb,
// we overwrite the result callbacks,
// so that we can clean up the allocations made
.cb = rrd_async_function_signal_when_ready,
.data = tmp,
},
.progress = {
.cb = r->progress.cb,
.data = r->progress.data,
},
.is_cancelled = {
.cb = rrd_inflight_async_function_is_cancelled,
.data = r,
},
.register_canceller = {
.cb = rrd_inflight_async_function_register_canceller_cb,
.data = r,
},
.register_progresser = {
.cb = rrd_inflight_async_function_register_progresser_cb,
.data = r,
},
};
int code = r->rdcf->execute_cb(&rfe, r->rdcf->execute_cb_data);
// this has to happen after we execute the callback
// because if an async call is responded in sync mode, there will be a deadlock.
netdata_mutex_lock(&tmp->mutex);
if (code == HTTP_RESP_OK || tmp->data_are_ready) {
bool cancelled = false;
int rc = 0;
while (rc == 0 && !cancelled && !tmp->data_are_ready) {
usec_t now_mono_ut = now_monotonic_usec();
usec_t stop_mono_ut = __atomic_load_n(&r->stop_monotonic_ut, __ATOMIC_RELAXED) + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT;
if(now_mono_ut > stop_mono_ut) {
rc = ETIMEDOUT;
break;
}
// wait for 10ms, and loop again...
struct timespec tp;
clock_gettime(CLOCK_REALTIME, &tp);
tp.tv_nsec += 10 * NSEC_PER_MSEC;
if(tp.tv_nsec > (long)(1 * NSEC_PER_SEC)) {
tp.tv_sec++;
tp.tv_nsec -= 1 * NSEC_PER_SEC;
}
// the mutex is unlocked within pthread_cond_timedwait()
rc = pthread_cond_timedwait(&tmp->cond, &tmp->mutex, &tp);
// the mutex is again ours
if(rc == ETIMEDOUT) {
// 10ms have passed
rc = 0;
if (!tmp->data_are_ready && r->is_cancelled.cb &&
r->is_cancelled.cb(r->is_cancelled.data)) {
// internal_error(true, "FUNCTIONS: transaction '%s' is cancelled while waiting for response",
// r->transaction);
cancelled = true;
rrd_function_cancel_inflight(r);
break;
}
}
}
if (tmp->data_are_ready) {
// we have a response
buffer_contents_replace(r->result.wb, buffer_tostring(temp_wb), buffer_strlen(temp_wb));
r->result.wb->content_type = temp_wb->content_type;
r->result.wb->expires = temp_wb->expires;
if(r->result.wb->expires)
buffer_cacheable(r->result.wb);
else
buffer_no_cacheable(r->result.wb);
code = tmp->code;
tmp->free_with_signal = false;
we_should_free = true;
}
else if (rc == ETIMEDOUT || cancelled) {
// timeout
// we will go away and let the callback free the structure
if(cancelled)
code = rrd_call_function_error(r->result.wb,
"Request cancelled",
HTTP_RESP_CLIENT_CLOSED_REQUEST);
else
code = rrd_call_function_error(r->result.wb,
"Timeout while waiting for a response from the collector.",
HTTP_RESP_GATEWAY_TIMEOUT);
tmp->free_with_signal = true;
we_should_free = false;
}
else {
code = rrd_call_function_error(
r->result.wb, "Internal error while communicating with the collector",
HTTP_RESP_INTERNAL_SERVER_ERROR);
tmp->free_with_signal = true;
we_should_free = false;
}
}
else {
// the response is not ok, and we don't have the data
tmp->free_with_signal = true;
we_should_free = false;
}
netdata_mutex_unlock(&tmp->mutex);
if (we_should_free) {
rrd_function_call_wait_free(tmp);
buffer_free(temp_wb);
}
return code;
}
static inline int rrd_call_function_async(struct rrd_function_inflight *r, bool wait) {
if(wait)
return rrd_call_function_async_and_wait(r);
else
return rrd_call_function_async_and_dont_wait(r);
}
// ----------------------------------------------------------------------------
int rrd_function_run(RRDHOST *host, BUFFER *result_wb, int timeout_s, HTTP_ACCESS access, const char *cmd,
bool wait, const char *transaction,
rrd_function_result_callback_t result_cb, void *result_cb_data,
rrd_function_progress_cb_t progress_cb, void *progress_cb_data,
rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
BUFFER *payload, const char *source) {
int code;
char sanitized_cmd[PLUGINSD_LINE_MAX + 1];
const DICTIONARY_ITEM *host_function_acquired = NULL;
char sanitized_source[(source ? strlen(source) : 0) + 1];
rrd_functions_sanitize(sanitized_source, source ? source : "", sizeof(sanitized_source));
// ------------------------------------------------------------------------
// find the function
size_t sanitized_cmd_length = rrd_functions_sanitize(sanitized_cmd, cmd, sizeof(sanitized_cmd));
code = rrd_functions_find_by_name(host, result_wb, sanitized_cmd, sanitized_cmd_length, &host_function_acquired);
if(code != HTTP_RESP_OK) {
rrd_call_function_error(result_wb, "not found", code);
if(result_cb)
result_cb(result_wb, code, result_cb_data);
return code;
}
struct rrd_host_function *rdcf = dictionary_acquired_item_value(host_function_acquired);
if(!web_client_has_enough_access_level(access, rdcf->access)) {
if(!aclk_connected)
rrd_call_function_error(result_wb, "This Netdata must be connected to Netdata Cloud to access this function.", HTTP_RESP_PRECOND_FAIL);
else if(access >= HTTP_ACCESS_ANY)
rrd_call_function_error(result_wb, "You need to login to the Netdata Cloud space this agent is claimed to, to access this function.", HTTP_RESP_PRECOND_FAIL);
else /* if(access < HTTP_ACCESS_ANY && rdcf->access < access) */
rrd_call_function_error(result_wb, "To access this function you need to be an admin in this Netdata Cloud space.", HTTP_RESP_PRECOND_FAIL);
dictionary_acquired_item_release(host->functions, host_function_acquired);
if(result_cb)
result_cb(result_wb, HTTP_RESP_PRECOND_FAIL, result_cb_data);
return HTTP_RESP_PRECOND_FAIL;
}
if(timeout_s <= 0)
timeout_s = rdcf->timeout;
// ------------------------------------------------------------------------
// validate and parse the transaction, or generate a new transaction id
char uuid_str[UUID_COMPACT_STR_LEN];
uuid_t uuid;
if(!transaction || !*transaction || uuid_parse_flexi(transaction, uuid) != 0)
uuid_generate_random(uuid);
uuid_unparse_lower_compact(uuid, uuid_str);
transaction = uuid_str;
// ------------------------------------------------------------------------
// the function can only be executed in async mode
// put the function into the inflight requests
struct rrd_function_inflight t = {
.used = false,
.host = host,
.cmd = strdupz(cmd),
.sanitized_cmd = strdupz(sanitized_cmd),
.sanitized_cmd_length = sanitized_cmd_length,
.transaction = strdupz(transaction),
.source = strdupz(sanitized_source),
.payload = buffer_dup(payload),
.timeout = timeout_s,
.cancelled = false,
.stop_monotonic_ut = now_monotonic_usec() + timeout_s * USEC_PER_SEC,
.host_function_acquired = host_function_acquired,
.rdcf = rdcf,
.result = {
.wb = result_wb,
.cb = result_cb,
.data = result_cb_data,
},
.is_cancelled = {
.cb = is_cancelled_cb,
.data = is_cancelled_cb_data,
},
.progress = {
.cb = progress_cb,
.data = progress_cb_data,
},
};
uuid_copy(t.transaction_uuid, uuid);
struct rrd_function_inflight *r = dictionary_set(rrd_functions_inflight_requests, transaction, &t, sizeof(t));
if(r->used) {
nd_log(NDLS_DAEMON, NDLP_NOTICE,
"FUNCTIONS: duplicate transaction '%s', function: '%s'",
t.transaction, t.cmd);
code = rrd_call_function_error(result_wb, "duplicate transaction", HTTP_RESP_BAD_REQUEST);
rrd_functions_inflight_cleanup(&t);
dictionary_acquired_item_release(r->host->functions, t.host_function_acquired);
if(result_cb)
result_cb(result_wb, code, result_cb_data);
return code;
}
r->used = true;
// internal_error(true, "FUNCTIONS: transaction '%s' started", r->transaction);
if(r->rdcf->sync) {
// the caller has to wait
struct rrd_function_execute rfe = {
.transaction = &r->transaction_uuid,
.function = r->sanitized_cmd,
.payload = r->payload,
.source = r->source,
.stop_monotonic_ut = &r->stop_monotonic_ut,
.result = {
.wb = r->result.wb,
// we overwrite the result callbacks,
// so that we can clean up the allocations made
.cb = r->result.cb,
.data = r->result.data,
},
.progress = {
.cb = r->progress.cb,
.data = r->progress.data,
},
.is_cancelled = {
.cb = r->is_cancelled.cb,
.data = r->is_cancelled.data,
},
.register_canceller = {
.cb = NULL,
.data = NULL,
},
.register_progresser = {
.cb = NULL,
.data = NULL,
},
};
code = r->rdcf->execute_cb(&rfe, r->rdcf->execute_cb_data);
rrd_inflight_function_cleanup(host, r->transaction);
return code;
}
return rrd_call_function_async(r, wait);
}
bool rrd_function_has_this_original_result_callback(uuid_t *transaction, rrd_function_result_callback_t cb) {
bool ret = false;
char str[UUID_COMPACT_STR_LEN];
uuid_unparse_lower_compact(*transaction, str);
const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item(rrd_functions_inflight_requests, str);
if(item) {
struct rrd_function_inflight *r = dictionary_acquired_item_value(item);
if(r->result.cb == cb)
ret = true;
dictionary_acquired_item_release(rrd_functions_inflight_requests, item);
}
return ret;
}
static void rrd_function_cancel_inflight(struct rrd_function_inflight *r) {
if(!r)
return;
bool cancelled = __atomic_load_n(&r->cancelled, __ATOMIC_RELAXED);
if(cancelled) {
nd_log(NDLS_DAEMON, NDLP_DEBUG,
"FUNCTIONS: received a CANCEL request for transaction '%s', but it is already cancelled.",
r->transaction);
return;
}
__atomic_store_n(&r->cancelled, true, __ATOMIC_RELAXED);
if(!rrd_collector_dispatcher_acquire(r->rdcf->collector)) {
nd_log(NDLS_DAEMON, NDLP_DEBUG,
"FUNCTIONS: received a CANCEL request for transaction '%s', but the collector is not running.",
r->transaction);
return;
}
if(r->canceller.cb)
r->canceller.cb(r->canceller.data);
rrd_collector_dispatcher_release(r->rdcf->collector);
}
void rrd_function_cancel(const char *transaction) {
// internal_error(true, "FUNCTIONS: request to cancel transaction '%s'", transaction);
const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item(rrd_functions_inflight_requests, transaction);
if(!item) {
nd_log(NDLS_DAEMON, NDLP_DEBUG,
"FUNCTIONS: received a CANCEL request for transaction '%s', but the transaction is not running.",
transaction);
return;
}
struct rrd_function_inflight *r = dictionary_acquired_item_value(item);
rrd_function_cancel_inflight(r);
dictionary_acquired_item_release(rrd_functions_inflight_requests, item);
}
void rrd_function_progress(const char *transaction) {
const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item(rrd_functions_inflight_requests, transaction);
if(!item) {
nd_log(NDLS_DAEMON, NDLP_DEBUG,
"FUNCTIONS: received a PROGRESS request for transaction '%s', but the transaction is not running.",
transaction);
return;
}
struct rrd_function_inflight *r = dictionary_acquired_item_value(item);
if(!rrd_collector_dispatcher_acquire(r->rdcf->collector)) {
nd_log(NDLS_DAEMON, NDLP_DEBUG,
"FUNCTIONS: received a PROGRESS request for transaction '%s', but the collector is not running.",
transaction);
goto cleanup;
}
functions_stop_monotonic_update_on_progress(&r->stop_monotonic_ut);
if(r->progresser.cb)
r->progresser.cb(r->progresser.data);
rrd_collector_dispatcher_release(r->rdcf->collector);
cleanup:
dictionary_acquired_item_release(rrd_functions_inflight_requests, item);
}
void rrd_function_call_progresser(uuid_t *transaction) {
char str[UUID_COMPACT_STR_LEN];
uuid_unparse_lower_compact(*transaction, str);
rrd_function_progress(str);
}

View file

@ -0,0 +1,16 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#ifndef NETDATA_RRDFUNCTIONS_INFLIGHT_H
#define NETDATA_RRDFUNCTIONS_INFLIGHT_H
#include "rrd.h"
void rrd_functions_inflight_init(void);
// cancel a running function, to be run from anywhere
void rrd_function_cancel(const char *transaction);
void rrd_function_progress(const char *transaction);
void rrd_function_call_progresser(uuid_t *transaction);
#endif //NETDATA_RRDFUNCTIONS_INFLIGHT_H

View file

@ -0,0 +1,42 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "rrdfunctions-inline.h"
struct rrd_function_inline {
rrd_function_execute_inline_cb_t cb;
};
static int rrd_function_run_inline(struct rrd_function_execute *rfe, void *data) {
// IMPORTANT: this function MUST call the result_cb even on failures
struct rrd_function_inline *fi = data;
int code;
if(rfe->is_cancelled.cb && rfe->is_cancelled.cb(rfe->is_cancelled.data))
code = HTTP_RESP_CLIENT_CLOSED_REQUEST;
else
code = fi->cb(rfe->result.wb, rfe->function);
if(code == HTTP_RESP_CLIENT_CLOSED_REQUEST || (rfe->is_cancelled.cb && rfe->is_cancelled.cb(rfe->is_cancelled.data))) {
buffer_flush(rfe->result.wb);
code = HTTP_RESP_CLIENT_CLOSED_REQUEST;
}
if(rfe->result.cb)
rfe->result.cb(rfe->result.wb, code, rfe->result.data);
return code;
}
void rrd_function_add_inline(RRDHOST *host, RRDSET *st, const char *name, int timeout, int priority, const char *help, const char *tags,
HTTP_ACCESS access, rrd_function_execute_inline_cb_t execute_cb) {
rrd_collector_started(); // this creates a collector that runs for as long as netdata runs
struct rrd_function_inline *fi = callocz(1, sizeof(struct rrd_function_inline));
fi->cb = execute_cb;
rrd_function_add(host, st, name, timeout, priority, help, tags, access, true, rrd_function_run_inline, fi);
}

View file

@ -0,0 +1,14 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#ifndef NETDATA_RRDFUNCTIONS_INLINE_H
#define NETDATA_RRDFUNCTIONS_INLINE_H
#include "rrd.h"
typedef int (*rrd_function_execute_inline_cb_t)(BUFFER *wb, const char *function);
void rrd_function_add_inline(RRDHOST *host, RRDSET *st, const char *name, int timeout, int priority,
const char *help, const char *tags,
HTTP_ACCESS access, rrd_function_execute_inline_cb_t execute_cb);
#endif //NETDATA_RRDFUNCTIONS_INLINE_H

View file

@ -0,0 +1,36 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#ifndef NETDATA_RRDFUNCTIONS_INTERNALS_H
#define NETDATA_RRDFUNCTIONS_INTERNALS_H
#include "rrd.h"
#include "rrdcollector-internals.h"
typedef enum __attribute__((packed)) {
RRD_FUNCTION_LOCAL = (1 << 0),
RRD_FUNCTION_GLOBAL = (1 << 1),
RRD_FUNCTION_DYNCFG = (1 << 2),
// this is 8-bit
} RRD_FUNCTION_OPTIONS;
struct rrd_host_function {
bool sync; // when true, the function is called synchronously
RRD_FUNCTION_OPTIONS options; // RRD_FUNCTION_OPTIONS
HTTP_ACCESS access;
STRING *help;
STRING *tags;
int timeout; // the default timeout of the function
int priority;
rrd_function_execute_cb_t execute_cb;
void *execute_cb_data;
struct rrd_collector *collector;
};
size_t rrd_functions_sanitize(char *dst, const char *src, size_t dst_len);
int rrd_functions_find_by_name(RRDHOST *host, BUFFER *wb, const char *name, size_t key_length, const DICTIONARY_ITEM **item);
#endif //NETDATA_RRDFUNCTIONS_INTERNALS_H

View file

@ -0,0 +1,8 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "rrdfunctions-progress.h"
int rrdhost_function_progress(BUFFER *wb, const char *function __maybe_unused) {
return progress_function_result(wb, rrdhost_hostname(localhost));
}

View file

@ -0,0 +1,10 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#ifndef NETDATA_RRDFUNCTIONS_PROGRESS_H
#define NETDATA_RRDFUNCTIONS_PROGRESS_H
#include "rrd.h"
int rrdhost_function_progress(BUFFER *wb, const char *function __maybe_unused);
#endif //NETDATA_RRDFUNCTIONS_PROGRESS_H

View file

@ -0,0 +1,626 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "rrdfunctions-streaming.h"
int rrdhost_function_streaming(BUFFER *wb, const char *function __maybe_unused) {
time_t now = now_realtime_sec();
buffer_flush(wb);
wb->content_type = CT_APPLICATION_JSON;
buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT);
buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(localhost));
buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
buffer_json_member_add_string(wb, "type", "table");
buffer_json_member_add_time_t(wb, "update_every", 1);
buffer_json_member_add_string(wb, "help", RRDFUNCTIONS_STREAMING_HELP);
buffer_json_member_add_array(wb, "data");
size_t max_sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_MAX] = { 0 };
size_t max_db_metrics = 0, max_db_instances = 0, max_db_contexts = 0;
size_t max_collection_replication_instances = 0, max_streaming_replication_instances = 0;
size_t max_ml_anomalous = 0, max_ml_normal = 0, max_ml_trained = 0, max_ml_pending = 0, max_ml_silenced = 0;
{
RRDHOST *host;
dfe_start_read(rrdhost_root_index, host) {
RRDHOST_STATUS s;
rrdhost_status(host, now, &s);
buffer_json_add_array_item_array(wb);
if(s.db.metrics > max_db_metrics)
max_db_metrics = s.db.metrics;
if(s.db.instances > max_db_instances)
max_db_instances = s.db.instances;
if(s.db.contexts > max_db_contexts)
max_db_contexts = s.db.contexts;
if(s.ingest.replication.instances > max_collection_replication_instances)
max_collection_replication_instances = s.ingest.replication.instances;
if(s.stream.replication.instances > max_streaming_replication_instances)
max_streaming_replication_instances = s.stream.replication.instances;
for(int i = 0; i < STREAM_TRAFFIC_TYPE_MAX ;i++) {
if (s.stream.sent_bytes_on_this_connection_per_type[i] >
max_sent_bytes_on_this_connection_per_type[i])
max_sent_bytes_on_this_connection_per_type[i] =
s.stream.sent_bytes_on_this_connection_per_type[i];
}
// retention
buffer_json_add_array_item_string(wb, rrdhost_hostname(s.host)); // Node
buffer_json_add_array_item_uint64(wb, s.db.first_time_s * MSEC_PER_SEC); // dbFrom
buffer_json_add_array_item_uint64(wb, s.db.last_time_s * MSEC_PER_SEC); // dbTo
if(s.db.first_time_s && s.db.last_time_s && s.db.last_time_s > s.db.first_time_s)
buffer_json_add_array_item_uint64(wb, s.db.last_time_s - s.db.first_time_s); // dbDuration
else
buffer_json_add_array_item_string(wb, NULL); // dbDuration
buffer_json_add_array_item_uint64(wb, s.db.metrics); // dbMetrics
buffer_json_add_array_item_uint64(wb, s.db.instances); // dbInstances
buffer_json_add_array_item_uint64(wb, s.db.contexts); // dbContexts
// statuses
buffer_json_add_array_item_string(wb, rrdhost_ingest_status_to_string(s.ingest.status)); // InStatus
buffer_json_add_array_item_string(wb, rrdhost_streaming_status_to_string(s.stream.status)); // OutStatus
buffer_json_add_array_item_string(wb, rrdhost_ml_status_to_string(s.ml.status)); // MLStatus
// collection
if(s.ingest.since) {
buffer_json_add_array_item_uint64(wb, s.ingest.since * MSEC_PER_SEC); // InSince
buffer_json_add_array_item_time_t(wb, s.now - s.ingest.since); // InAge
}
else {
buffer_json_add_array_item_string(wb, NULL); // InSince
buffer_json_add_array_item_string(wb, NULL); // InAge
}
buffer_json_add_array_item_string(wb, stream_handshake_error_to_string(s.ingest.reason)); // InReason
buffer_json_add_array_item_uint64(wb, s.ingest.hops); // InHops
buffer_json_add_array_item_double(wb, s.ingest.replication.completion); // InReplCompletion
buffer_json_add_array_item_uint64(wb, s.ingest.replication.instances); // InReplInstances
buffer_json_add_array_item_string(wb, s.ingest.peers.local.ip); // InLocalIP
buffer_json_add_array_item_uint64(wb, s.ingest.peers.local.port); // InLocalPort
buffer_json_add_array_item_string(wb, s.ingest.peers.peer.ip); // InRemoteIP
buffer_json_add_array_item_uint64(wb, s.ingest.peers.peer.port); // InRemotePort
buffer_json_add_array_item_string(wb, s.ingest.ssl ? "SSL" : "PLAIN"); // InSSL
stream_capabilities_to_json_array(wb, s.ingest.capabilities, NULL); // InCapabilities
// streaming
if(s.stream.since) {
buffer_json_add_array_item_uint64(wb, s.stream.since * MSEC_PER_SEC); // OutSince
buffer_json_add_array_item_time_t(wb, s.now - s.stream.since); // OutAge
}
else {
buffer_json_add_array_item_string(wb, NULL); // OutSince
buffer_json_add_array_item_string(wb, NULL); // OutAge
}
buffer_json_add_array_item_string(wb, stream_handshake_error_to_string(s.stream.reason)); // OutReason
buffer_json_add_array_item_uint64(wb, s.stream.hops); // OutHops
buffer_json_add_array_item_double(wb, s.stream.replication.completion); // OutReplCompletion
buffer_json_add_array_item_uint64(wb, s.stream.replication.instances); // OutReplInstances
buffer_json_add_array_item_string(wb, s.stream.peers.local.ip); // OutLocalIP
buffer_json_add_array_item_uint64(wb, s.stream.peers.local.port); // OutLocalPort
buffer_json_add_array_item_string(wb, s.stream.peers.peer.ip); // OutRemoteIP
buffer_json_add_array_item_uint64(wb, s.stream.peers.peer.port); // OutRemotePort
buffer_json_add_array_item_string(wb, s.stream.ssl ? "SSL" : "PLAIN"); // OutSSL
buffer_json_add_array_item_string(wb, s.stream.compression ? "COMPRESSED" : "UNCOMPRESSED"); // OutCompression
stream_capabilities_to_json_array(wb, s.stream.capabilities, NULL); // OutCapabilities
buffer_json_add_array_item_uint64(wb, s.stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_DATA]);
buffer_json_add_array_item_uint64(wb, s.stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_METADATA]);
buffer_json_add_array_item_uint64(wb, s.stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_REPLICATION]);
buffer_json_add_array_item_uint64(wb, s.stream.sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_FUNCTIONS]);
buffer_json_add_array_item_array(wb); // OutAttemptHandshake
time_t last_attempt = 0;
for(struct rrdpush_destinations *d = host->destinations; d ; d = d->next) {
if(d->since > last_attempt)
last_attempt = d->since;
buffer_json_add_array_item_string(wb, stream_handshake_error_to_string(d->reason));
}
buffer_json_array_close(wb); // // OutAttemptHandshake
if(!last_attempt) {
buffer_json_add_array_item_string(wb, NULL); // OutAttemptSince
buffer_json_add_array_item_string(wb, NULL); // OutAttemptAge
}
else {
buffer_json_add_array_item_uint64(wb, last_attempt * 1000); // OutAttemptSince
buffer_json_add_array_item_time_t(wb, s.now - last_attempt); // OutAttemptAge
}
// ML
if(s.ml.status == RRDHOST_ML_STATUS_RUNNING) {
buffer_json_add_array_item_uint64(wb, s.ml.metrics.anomalous); // MlAnomalous
buffer_json_add_array_item_uint64(wb, s.ml.metrics.normal); // MlNormal
buffer_json_add_array_item_uint64(wb, s.ml.metrics.trained); // MlTrained
buffer_json_add_array_item_uint64(wb, s.ml.metrics.pending); // MlPending
buffer_json_add_array_item_uint64(wb, s.ml.metrics.silenced); // MlSilenced
if(s.ml.metrics.anomalous > max_ml_anomalous)
max_ml_anomalous = s.ml.metrics.anomalous;
if(s.ml.metrics.normal > max_ml_normal)
max_ml_normal = s.ml.metrics.normal;
if(s.ml.metrics.trained > max_ml_trained)
max_ml_trained = s.ml.metrics.trained;
if(s.ml.metrics.pending > max_ml_pending)
max_ml_pending = s.ml.metrics.pending;
if(s.ml.metrics.silenced > max_ml_silenced)
max_ml_silenced = s.ml.metrics.silenced;
}
else {
buffer_json_add_array_item_string(wb, NULL); // MlAnomalous
buffer_json_add_array_item_string(wb, NULL); // MlNormal
buffer_json_add_array_item_string(wb, NULL); // MlTrained
buffer_json_add_array_item_string(wb, NULL); // MlPending
buffer_json_add_array_item_string(wb, NULL); // MlSilenced
}
// close
buffer_json_array_close(wb);
}
dfe_done(host);
}
buffer_json_array_close(wb); // data
buffer_json_member_add_object(wb, "columns");
{
size_t field_id = 0;
// Node
buffer_rrdf_table_add_field(wb, field_id++, "Node", "Node's Hostname",
RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY | RRDF_FIELD_OPTS_STICKY,
NULL);
buffer_rrdf_table_add_field(wb, field_id++, "dbFrom", "DB Data Retention From",
RRDF_FIELD_TYPE_TIMESTAMP, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DATETIME_MS,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "dbTo", "DB Data Retention To",
RRDF_FIELD_TYPE_TIMESTAMP, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DATETIME_MS,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_MAX, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "dbDuration", "DB Data Retention Duration",
RRDF_FIELD_TYPE_DURATION, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DURATION_S,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_MAX, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_VISIBLE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "dbMetrics", "Time-series Metrics in the DB",
RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
0, NULL, (double)max_db_metrics, RRDF_FIELD_SORT_DESCENDING, NULL,
RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_VISIBLE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "dbInstances", "Instances in the DB",
RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
0, NULL, (double)max_db_instances, RRDF_FIELD_SORT_DESCENDING, NULL,
RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_VISIBLE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "dbContexts", "Contexts in the DB",
RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
0, NULL, (double)max_db_contexts, RRDF_FIELD_SORT_DESCENDING, NULL,
RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_VISIBLE, NULL);
// --- statuses ---
buffer_rrdf_table_add_field(wb, field_id++, "InStatus", "Data Collection Online Status",
RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
RRDF_FIELD_OPTS_VISIBLE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "OutStatus", "Streaming Online Status",
RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
RRDF_FIELD_OPTS_VISIBLE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "MlStatus", "ML Status",
RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
RRDF_FIELD_OPTS_VISIBLE, NULL);
// --- collection ---
buffer_rrdf_table_add_field(wb, field_id++, "InSince", "Last Data Collection Status Change",
RRDF_FIELD_TYPE_TIMESTAMP, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DATETIME_MS,
0, NULL, NAN, RRDF_FIELD_SORT_DESCENDING, NULL,
RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "InAge", "Last Data Collection Online Status Change Age",
RRDF_FIELD_TYPE_DURATION, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DURATION_S,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_MAX, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_VISIBLE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "InReason", "Data Collection Online Status Reason",
RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
RRDF_FIELD_OPTS_VISIBLE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "InHops", "Data Collection Distance Hops from Origin Node",
RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_VISIBLE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "InReplCompletion", "Inbound Replication Completion",
RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
1, "%", 100.0, RRDF_FIELD_SORT_DESCENDING, NULL,
RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_VISIBLE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "InReplInstances", "Inbound Replicating Instances",
RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
0, "instances", (double)max_collection_replication_instances, RRDF_FIELD_SORT_DESCENDING,
NULL,
RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "InLocalIP", "Inbound Local IP",
RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "InLocalPort", "Inbound Local Port",
RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "InRemoteIP", "Inbound Remote IP",
RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "InRemotePort", "Inbound Remote Port",
RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "InSSL", "Inbound SSL Connection",
RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "InCapabilities", "Inbound Connection Capabilities",
RRDF_FIELD_TYPE_ARRAY, RRDF_FIELD_VISUAL_PILL, RRDF_FIELD_TRANSFORM_NONE,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
RRDF_FIELD_OPTS_NONE, NULL);
// --- streaming ---
buffer_rrdf_table_add_field(wb, field_id++, "OutSince", "Last Streaming Status Change",
RRDF_FIELD_TYPE_TIMESTAMP, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DATETIME_MS,
0, NULL, NAN, RRDF_FIELD_SORT_DESCENDING, NULL,
RRDF_FIELD_SUMMARY_MAX, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "OutAge", "Last Streaming Status Change Age",
RRDF_FIELD_TYPE_DURATION, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DURATION_S,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_VISIBLE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "OutReason", "Streaming Status Reason",
RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
RRDF_FIELD_OPTS_VISIBLE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "OutHops", "Streaming Distance Hops from Origin Node",
RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_VISIBLE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "OutReplCompletion", "Outbound Replication Completion",
RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
1, "%", 100.0, RRDF_FIELD_SORT_DESCENDING, NULL,
RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_VISIBLE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "OutReplInstances", "Outbound Replicating Instances",
RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
0, "instances", (double)max_streaming_replication_instances, RRDF_FIELD_SORT_DESCENDING,
NULL,
RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "OutLocalIP", "Outbound Local IP",
RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "OutLocalPort", "Outbound Local Port",
RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "OutRemoteIP", "Outbound Remote IP",
RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "OutRemotePort", "Outbound Remote Port",
RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "OutSSL", "Outbound SSL Connection",
RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "OutCompression", "Outbound Compressed Connection",
RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "OutCapabilities", "Outbound Connection Capabilities",
RRDF_FIELD_TYPE_ARRAY, RRDF_FIELD_VISUAL_PILL, RRDF_FIELD_TRANSFORM_NONE,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "OutTrafficData", "Outbound Metric Data Traffic",
RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
0, "bytes", (double)max_sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_DATA],
RRDF_FIELD_SORT_DESCENDING, NULL,
RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "OutTrafficMetadata", "Outbound Metric Metadata Traffic",
RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
0, "bytes",
(double)max_sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_METADATA],
RRDF_FIELD_SORT_DESCENDING, NULL,
RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "OutTrafficReplication", "Outbound Metric Replication Traffic",
RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
0, "bytes",
(double)max_sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_REPLICATION],
RRDF_FIELD_SORT_DESCENDING, NULL,
RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "OutTrafficFunctions", "Outbound Metric Functions Traffic",
RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
0, "bytes",
(double)max_sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_FUNCTIONS],
RRDF_FIELD_SORT_DESCENDING, NULL,
RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "OutAttemptHandshake",
"Outbound Connection Attempt Handshake Status",
RRDF_FIELD_TYPE_ARRAY, RRDF_FIELD_VISUAL_PILL, RRDF_FIELD_TRANSFORM_NONE,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "OutAttemptSince",
"Last Outbound Connection Attempt Status Change Time",
RRDF_FIELD_TYPE_TIMESTAMP, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DATETIME_MS,
0, NULL, NAN, RRDF_FIELD_SORT_DESCENDING, NULL,
RRDF_FIELD_SUMMARY_MAX, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "OutAttemptAge",
"Last Outbound Connection Attempt Status Change Age",
RRDF_FIELD_TYPE_DURATION, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_DURATION_S,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
RRDF_FIELD_SUMMARY_MIN, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_VISIBLE, NULL);
// --- ML ---
buffer_rrdf_table_add_field(wb, field_id++, "MlAnomalous", "Number of Anomalous Metrics",
RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
0, "metrics",
(double)max_ml_anomalous,
RRDF_FIELD_SORT_DESCENDING, NULL,
RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "MlNormal", "Number of Not Anomalous Metrics",
RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
0, "metrics",
(double)max_ml_normal,
RRDF_FIELD_SORT_DESCENDING, NULL,
RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "MlTrained", "Number of Trained Metrics",
RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
0, "metrics",
(double)max_ml_trained,
RRDF_FIELD_SORT_DESCENDING, NULL,
RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "MlPending", "Number of Pending Metrics",
RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
0, "metrics",
(double)max_ml_pending,
RRDF_FIELD_SORT_DESCENDING, NULL,
RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "MlSilenced", "Number of Silenced Metrics",
RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
0, "metrics",
(double)max_ml_silenced,
RRDF_FIELD_SORT_DESCENDING, NULL,
RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
}
buffer_json_object_close(wb); // columns
buffer_json_member_add_string(wb, "default_sort_column", "Node");
buffer_json_member_add_object(wb, "charts");
{
// Data Collection Age chart
buffer_json_member_add_object(wb, "InAge");
{
buffer_json_member_add_string(wb, "name", "Data Collection Age");
buffer_json_member_add_string(wb, "type", "stacked-bar");
buffer_json_member_add_array(wb, "columns");
{
buffer_json_add_array_item_string(wb, "InAge");
}
buffer_json_array_close(wb);
}
buffer_json_object_close(wb);
// Streaming Age chart
buffer_json_member_add_object(wb, "OutAge");
{
buffer_json_member_add_string(wb, "name", "Streaming Age");
buffer_json_member_add_string(wb, "type", "stacked-bar");
buffer_json_member_add_array(wb, "columns");
{
buffer_json_add_array_item_string(wb, "OutAge");
}
buffer_json_array_close(wb);
}
buffer_json_object_close(wb);
// DB Duration
buffer_json_member_add_object(wb, "dbDuration");
{
buffer_json_member_add_string(wb, "name", "Retention Duration");
buffer_json_member_add_string(wb, "type", "stacked-bar");
buffer_json_member_add_array(wb, "columns");
{
buffer_json_add_array_item_string(wb, "dbDuration");
}
buffer_json_array_close(wb);
}
buffer_json_object_close(wb);
}
buffer_json_object_close(wb); // charts
buffer_json_member_add_array(wb, "default_charts");
{
buffer_json_add_array_item_array(wb);
buffer_json_add_array_item_string(wb, "InAge");
buffer_json_add_array_item_string(wb, "Node");
buffer_json_array_close(wb);
buffer_json_add_array_item_array(wb);
buffer_json_add_array_item_string(wb, "OutAge");
buffer_json_add_array_item_string(wb, "Node");
buffer_json_array_close(wb);
}
buffer_json_array_close(wb);
buffer_json_member_add_object(wb, "group_by");
{
buffer_json_member_add_object(wb, "Node");
{
buffer_json_member_add_string(wb, "name", "Node");
buffer_json_member_add_array(wb, "columns");
{
buffer_json_add_array_item_string(wb, "Node");
}
buffer_json_array_close(wb);
}
buffer_json_object_close(wb);
buffer_json_member_add_object(wb, "InStatus");
{
buffer_json_member_add_string(wb, "name", "Nodes by Collection Status");
buffer_json_member_add_array(wb, "columns");
{
buffer_json_add_array_item_string(wb, "InStatus");
}
buffer_json_array_close(wb);
}
buffer_json_object_close(wb);
buffer_json_member_add_object(wb, "OutStatus");
{
buffer_json_member_add_string(wb, "name", "Nodes by Streaming Status");
buffer_json_member_add_array(wb, "columns");
{
buffer_json_add_array_item_string(wb, "OutStatus");
}
buffer_json_array_close(wb);
}
buffer_json_object_close(wb);
buffer_json_member_add_object(wb, "MlStatus");
{
buffer_json_member_add_string(wb, "name", "Nodes by ML Status");
buffer_json_member_add_array(wb, "columns");
{
buffer_json_add_array_item_string(wb, "MlStatus");
}
buffer_json_array_close(wb);
}
buffer_json_object_close(wb);
buffer_json_member_add_object(wb, "InRemoteIP");
{
buffer_json_member_add_string(wb, "name", "Nodes by Inbound IP");
buffer_json_member_add_array(wb, "columns");
{
buffer_json_add_array_item_string(wb, "InRemoteIP");
}
buffer_json_array_close(wb);
}
buffer_json_object_close(wb);
buffer_json_member_add_object(wb, "OutRemoteIP");
{
buffer_json_member_add_string(wb, "name", "Nodes by Outbound IP");
buffer_json_member_add_array(wb, "columns");
{
buffer_json_add_array_item_string(wb, "OutRemoteIP");
}
buffer_json_array_close(wb);
}
buffer_json_object_close(wb);
}
buffer_json_object_close(wb); // group_by
buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1);
buffer_json_finalize(wb);
return HTTP_RESP_OK;
}

View file

@ -0,0 +1,12 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#ifndef NETDATA_RRDFUNCTIONS_STREAMING_H
#define NETDATA_RRDFUNCTIONS_STREAMING_H
#include "rrd.h"
#define RRDFUNCTIONS_STREAMING_HELP "Streaming status for parents and children."
int rrdhost_function_streaming(BUFFER *wb, const char *function);
#endif //NETDATA_RRDFUNCTIONS_STREAMING_H

File diff suppressed because it is too large Load diff

View file

@ -4,7 +4,7 @@
// ----------------------------------------------------------------------------
#include "rrd.h"
#include "../libnetdata/libnetdata.h"
#define RRDFUNCTIONS_PRIORITY_DEFAULT 100
@ -18,65 +18,76 @@ typedef void (*rrd_function_progress_cb_t)(void *data, size_t done, size_t all);
typedef void (*rrd_function_progresser_cb_t)(void *data);
typedef void (*rrd_function_register_progresser_cb_t)(void *register_progresser_cb_data, rrd_function_progresser_cb_t progresser_cb, void *progresser_cb_data);
typedef int (*rrd_function_execute_cb_t)(uuid_t *transaction, BUFFER *wb,
usec_t *stop_monotonic_ut, const char *function, void *collector_data,
rrd_function_result_callback_t result_cb, void *result_cb_data,
rrd_function_progress_cb_t progress_cb, void *progress_cb_data,
rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
rrd_function_register_canceller_cb_t register_canceller_cb, void *register_canceller_cb_data,
rrd_function_register_progresser_cb_t register_progresser_cb, void *register_progresser_cb_data);
struct rrd_function_execute {
uuid_t *transaction;
const char *function;
BUFFER *payload;
const char *source;
void rrd_functions_inflight_init(void);
void rrdfunctions_host_init(RRDHOST *host);
void rrdfunctions_host_destroy(RRDHOST *host);
usec_t *stop_monotonic_ut;
struct {
BUFFER *wb; // the response should be written here
rrd_function_result_callback_t cb;
void *data;
} result;
struct {
rrd_function_progress_cb_t cb;
void *data;
} progress;
struct {
rrd_function_is_cancelled_cb_t cb;
void *data;
} is_cancelled;
struct {
rrd_function_register_canceller_cb_t cb;
void *data;
} register_canceller;
struct {
rrd_function_register_progresser_cb_t cb;
void *data;
} register_progresser;
};
typedef int (*rrd_function_execute_cb_t)(struct rrd_function_execute *rfe, void *data);
// ----------------------------------------------------------------------------
#include "rrd.h"
void rrd_functions_host_init(RRDHOST *host);
void rrd_functions_host_destroy(RRDHOST *host);
// add a function, to be run from the collector
void rrd_function_add(RRDHOST *host, RRDSET *st, const char *name, int timeout, int priority, const char *help, const char *tags,
HTTP_ACCESS access, bool sync, rrd_function_execute_cb_t execute_cb,
void *execute_cb_data);
void rrd_function_del(RRDHOST *host, RRDSET *st, const char *name);
// call a function, to be run from anywhere
int rrd_function_run(RRDHOST *host, BUFFER *result_wb, int timeout_s, HTTP_ACCESS access, const char *cmd,
bool wait, const char *transaction,
rrd_function_result_callback_t result_cb, void *result_cb_data,
rrd_function_progress_cb_t progress_cb, void *progress_cb_data,
rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data, const char *payload);
rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
BUFFER *payload, const char *source);
// cancel a running function, to be run from anywhere
void rrd_function_cancel(const char *transaction);
void rrd_function_progress(const char *transaction);
void rrd_function_call_progresser(uuid_t *transaction);
void rrd_functions_expose_rrdpush(RRDSET *st, BUFFER *wb);
void rrd_functions_expose_global_rrdpush(RRDHOST *host, BUFFER *wb);
void chart_functions2json(RRDSET *st, BUFFER *wb);
void chart_functions_to_dict(DICTIONARY *rrdset_functions_view, DICTIONARY *dst, void *value, size_t value_size);
void host_functions_to_dict(RRDHOST *host, DICTIONARY *dst, void *value, size_t value_size, STRING **help, STRING **tags, HTTP_ACCESS *access, int *priority);
void host_functions2json(RRDHOST *host, BUFFER *wb);
uint8_t functions_format_to_content_type(const char *format);
const char *functions_content_type_to_format(HTTP_CONTENT_TYPE content_type);
int rrd_call_function_error(BUFFER *wb, const char *msg, int code);
int rrdhost_function_progress(uuid_t *transaction, BUFFER *wb,
usec_t *stop_monotonic_ut, const char *function, void *collector_data,
rrd_function_result_callback_t result_cb, void *result_cb_data,
rrd_function_progress_cb_t progress_cb, void *progress_cb_data,
rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
rrd_function_register_canceller_cb_t register_canceller_cb, void *register_canceller_cb_data,
rrd_function_register_progresser_cb_t register_progresser_cb,
void *register_progresser_cb_data);
bool rrd_function_available(RRDHOST *host, const char *function);
int rrdhost_function_streaming(uuid_t *transaction, BUFFER *wb,
usec_t *stop_monotonic_ut, const char *function, void *collector_data,
rrd_function_result_callback_t result_cb, void *result_cb_data,
rrd_function_progress_cb_t progress_cb, void *progress_cb_data,
rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
rrd_function_register_canceller_cb_t register_canceller_cb, void *register_canceller_cb_data,
rrd_function_register_progresser_cb_t register_progresser_cb,
void *register_progresser_cb_data);
bool rrd_function_has_this_original_result_callback(uuid_t *transaction, rrd_function_result_callback_t cb);
#define RRDFUNCTIONS_STREAMING_HELP "Streaming status for parents and children."
#include "rrdfunctions-inline.h"
#include "rrdfunctions-inflight.h"
#include "rrdfunctions-exporters.h"
#include "rrdfunctions-streaming.h"
#include "rrdfunctions-progress.h"
#endif // NETDATA_RRDFUNCTIONS_H

View file

@ -337,7 +337,7 @@ int is_legacy = 1;
netdata_mutex_init(&host->receiver_lock);
if (likely(!archived)) {
rrdfunctions_host_init(host);
rrd_functions_host_init(host);
host->last_connected = now_realtime_sec();
host->rrdlabels = rrdlabels_create();
rrdhost_initialize_rrdpush_sender(
@ -573,9 +573,6 @@ int is_legacy = 1;
, string2str(host->health.health_default_recipient)
);
host->configurable_plugins = dyncfg_dictionary_create();
dictionary_register_delete_callback(host->configurable_plugins, plugin_del_cb, NULL);
if(!archived) {
metaqueue_host_update_info(host);
rrdhost_load_rrdcontext_data(host);
@ -694,7 +691,7 @@ static void rrdhost_update(RRDHOST *host
if (rrdhost_flag_check(host, RRDHOST_FLAG_ARCHIVED)) {
rrdhost_flag_clear(host, RRDHOST_FLAG_ARCHIVED);
rrdfunctions_host_init(host);
rrd_functions_host_init(host);
if(!host->rrdlabels)
host->rrdlabels = rrdlabels_create();
@ -1115,18 +1112,17 @@ int rrd_init(char *hostname, struct rrdhost_system_info *system_info, bool unitt
if (unlikely(!localhost))
return 1;
dyncfg_host_init(localhost);
// we register this only on localhost
// for the other nodes, the origin server should register it
rrd_collector_started(); // this creates a collector that runs for as long as netdata runs
rrd_function_add(localhost, NULL, "streaming", 10, RRDFUNCTIONS_PRIORITY_DEFAULT + 1,
RRDFUNCTIONS_STREAMING_HELP, "top",
HTTP_ACCESS_MEMBERS, true,
rrdhost_function_streaming, NULL);
rrd_function_add_inline(localhost, NULL, "streaming", 10,
RRDFUNCTIONS_PRIORITY_DEFAULT + 1, RRDFUNCTIONS_STREAMING_HELP, "top",
HTTP_ACCESS_MEMBER, rrdhost_function_streaming);
rrd_function_add(localhost, NULL, "netdata-api-calls", 10, RRDFUNCTIONS_PRIORITY_DEFAULT + 2,
RRDFUNCTIONS_PROGRESS_HELP, "top",
HTTP_ACCESS_MEMBERS, true,
rrdhost_function_progress, NULL);
rrd_function_add_inline(localhost, NULL, "netdata-api-calls", 10,
RRDFUNCTIONS_PRIORITY_DEFAULT + 2, RRDFUNCTIONS_PROGRESS_HELP, "top",
HTTP_ACCESS_MEMBER, rrdhost_function_progress);
if (likely(system_info)) {
migrate_localhost(&localhost->host_uuid);
@ -1328,7 +1324,7 @@ void rrdhost_free___while_having_rrd_wrlock(RRDHOST *host, bool force) {
freez(host->node_id);
rrdfamily_index_destroy(host);
rrdfunctions_host_destroy(host);
rrd_functions_host_destroy(host);
rrdvariables_destroy(host->rrdvars);
if (host == localhost)
rrdvariables_destroy(health_rrdvars);
@ -1849,6 +1845,10 @@ void rrdhost_status(RRDHOST *host, time_t now, RRDHOST_STATUS *s) {
RRDHOST_FLAGS flags = __atomic_load_n(&host->flags, __ATOMIC_RELAXED);
// --- dyncfg ---
s->dyncfg.status = dyncfg_available_for_rrdhost(host) ? RRDHOST_DYNCFG_STATUS_AVAILABLE : RRDHOST_DYNCFG_STATUS_UNAVAILABLE;
// --- db ---
bool online = rrdhost_is_online(host);

View file

@ -448,7 +448,7 @@ __attribute__((constructor)) void initialize_labels_keys_char_map(void) {
label_names_char_map[' '] = '_';
label_names_char_map['\\'] = '/';
// create the spaces map
// create the space map
for(i = 0; i < 256 ;i++)
label_spaces_char_map[i] = (isspace(i) || iscntrl(i) || !isprint(i))?1:0;
@ -460,8 +460,8 @@ __attribute__((constructor)) void initialize_label_stats(void) {
dictionary_stats_category_rrdlabels.memory.values = 0;
}
size_t text_sanitize(unsigned char *dst, const unsigned char *src, size_t dst_size, unsigned char *char_map, bool utf, const char *empty, size_t *multibyte_length) {
if(unlikely(!dst_size)) return 0;
size_t text_sanitize(unsigned char *dst, const unsigned char *src, size_t dst_size, const unsigned char *char_map, bool utf, const char *empty, size_t *multibyte_length) {
if(unlikely(!src || !dst_size)) return 0;
if(unlikely(!src || !*src)) {
strncpyz((char *)dst, empty, dst_size);
@ -476,7 +476,7 @@ size_t text_sanitize(unsigned char *dst, const unsigned char *src, size_t dst_si
// make room for the final string termination
unsigned char *end = &d[dst_size - 1];
// copy while converting, but keep only one white space
// copy while converting, but keep only one space
// we start wil last_is_space = 1 to skip leading spaces
int last_is_space = 1;

View file

@ -20,7 +20,7 @@ typedef enum __attribute__ ((__packed__)) rrdlabel_source {
#define RRDLABEL_FLAG_INTERNAL (RRDLABEL_FLAG_OLD | RRDLABEL_FLAG_NEW | RRDLABEL_FLAG_DONT_DELETE)
size_t text_sanitize(unsigned char *dst, const unsigned char *src, size_t dst_size, unsigned char *char_map, bool utf, const char *empty, size_t *multibyte_length);
size_t text_sanitize(unsigned char *dst, const unsigned char *src, size_t dst_size, const unsigned char *char_map, bool utf, const char *empty, size_t *multibyte_length);
RRDLABELS *rrdlabels_create(void);
void rrdlabels_destroy(RRDLABELS *labels_dict);

View file

@ -237,7 +237,7 @@ void rrdsetvar_rename_all(RRDSET *st) {
void rrdsetvar_release_and_delete_all(RRDSET *st) {
RRDSETVAR *rs;
dfe_start_write(st->rrdsetvar_root_index, rs) {
dictionary_del_advanced(st->rrdsetvar_root_index, string2str(rs->name), (ssize_t)string_strlen(rs->name) + 1);
dictionary_del_advanced(st->rrdsetvar_root_index, string2str(rs->name), (ssize_t)string_strlen(rs->name));
}
dfe_done(rs);
}

View file

@ -107,7 +107,7 @@ void rrdvariables_destroy(DICTIONARY *dict) {
}
static inline const RRDVAR_ACQUIRED *rrdvar_get_and_acquire(DICTIONARY *dict, STRING *name) {
return (const RRDVAR_ACQUIRED *)dictionary_get_and_acquire_item_advanced(dict, string2str(name), (ssize_t)string_strlen(name) + 1);
return (const RRDVAR_ACQUIRED *)dictionary_get_and_acquire_item_advanced(dict, string2str(name), (ssize_t)string_strlen(name));
}
inline void rrdvar_release_and_del(DICTIONARY *dict, const RRDVAR_ACQUIRED *rva) {
@ -115,7 +115,7 @@ inline void rrdvar_release_and_del(DICTIONARY *dict, const RRDVAR_ACQUIRED *rva)
RRDVAR *rv = dictionary_acquired_item_value((const DICTIONARY_ITEM *)rva);
dictionary_del_advanced(dict, string2str(rv->name), (ssize_t)string_strlen(rv->name) + 1);
dictionary_del_advanced(dict, string2str(rv->name), (ssize_t)string_strlen(rv->name));
dictionary_acquired_item_release(dict, (const DICTIONARY_ITEM *)rva);
}
@ -130,7 +130,7 @@ inline const RRDVAR_ACQUIRED *rrdvar_add_and_acquire(const char *scope __maybe_u
.options = options,
.react_action = RRDVAR_REACT_NONE,
};
return (const RRDVAR_ACQUIRED *)dictionary_set_and_acquire_item_advanced(dict, string2str(name), (ssize_t)string_strlen(name) + 1, NULL, sizeof(RRDVAR), &tmp);
return (const RRDVAR_ACQUIRED *)dictionary_set_and_acquire_item_advanced(dict, string2str(name), (ssize_t)string_strlen(name), NULL, sizeof(RRDVAR), &tmp);
}
inline void rrdvar_add(const char *scope __maybe_unused, DICTIONARY *dict, STRING *name, RRDVAR_TYPE type, RRDVAR_FLAGS options, void *value) {
@ -143,7 +143,7 @@ inline void rrdvar_add(const char *scope __maybe_unused, DICTIONARY *dict, STRIN
.options = options,
.react_action = RRDVAR_REACT_NONE,
};
dictionary_set_advanced(dict, string2str(name), (ssize_t)string_strlen(name) + 1, NULL, sizeof(RRDVAR), &tmp);
dictionary_set_advanced(dict, string2str(name), (ssize_t)string_strlen(name), NULL, sizeof(RRDVAR), &tmp);
}
void rrdvar_delete_all(DICTIONARY *dict) {

View file

@ -35,37 +35,6 @@ typedef enum __attribute__ ((__packed__)) {
WB_CONTENT_NO_CACHEABLE = (1 << 1),
} BUFFER_OPTIONS;
typedef enum __attribute__ ((__packed__)) {
CT_NONE = 0,
CT_APPLICATION_JSON,
CT_TEXT_PLAIN,
CT_TEXT_HTML,
CT_APPLICATION_X_JAVASCRIPT,
CT_TEXT_CSS,
CT_TEXT_XML,
CT_APPLICATION_XML,
CT_TEXT_XSL,
CT_APPLICATION_OCTET_STREAM,
CT_APPLICATION_X_FONT_TRUETYPE,
CT_APPLICATION_X_FONT_OPENTYPE,
CT_APPLICATION_FONT_WOFF,
CT_APPLICATION_FONT_WOFF2,
CT_APPLICATION_VND_MS_FONTOBJ,
CT_IMAGE_SVG_XML,
CT_IMAGE_PNG,
CT_IMAGE_JPG,
CT_IMAGE_GIF,
CT_IMAGE_XICON,
CT_IMAGE_ICNS,
CT_IMAGE_BMP,
CT_PROMETHEUS,
CT_AUDIO_MPEG,
CT_AUDIO_OGG,
CT_VIDEO_MP4,
CT_APPLICATION_PDF,
CT_APPLICATION_ZIP,
} HTTP_CONTENT_TYPE;
typedef enum __attribute__ ((__packed__)) {
BUFFER_JSON_OPTIONS_DEFAULT = 0,
BUFFER_JSON_OPTIONS_MINIFY = (1 << 0),
@ -164,6 +133,9 @@ void buffer_json_finalize(BUFFER *wb);
static const char *buffer_tostring(BUFFER *wb)
{
if(unlikely(!wb))
return NULL;
buffer_need_bytes(wb, 1);
wb->buffer[wb->len] = '\0';
@ -1249,4 +1221,26 @@ buffer_rrdf_table_add_field(BUFFER *wb, size_t field_id, const char *key, const
buffer_json_object_close(wb);
}
static inline void buffer_copy(BUFFER *dst, BUFFER *src) {
if(!src || !dst)
return;
buffer_contents_replace(dst, buffer_tostring(src), buffer_strlen(src));
dst->content_type = src->content_type;
dst->options = src->options;
dst->date = src->date;
dst->expires = src->expires;
dst->json = src->json;
}
static inline BUFFER *buffer_dup(BUFFER *src) {
if(!src)
return NULL;
BUFFER *dst = buffer_create(buffer_strlen(src) + 1, src->statistics);
buffer_copy(dst, src);
return dst;
}
#endif /* NETDATA_WEB_BUFFER_H */

297
libnetdata/config/dyncfg.c Normal file
View file

@ -0,0 +1,297 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "../libnetdata.h"
// ----------------------------------------------------------------------------
static struct {
DYNCFG_TYPE type;
const char *name;
} dyncfg_types[] = {
{ .type = DYNCFG_TYPE_SINGLE, .name = "single" },
{ .type = DYNCFG_TYPE_TEMPLATE, .name = "template" },
{ .type = DYNCFG_TYPE_JOB, .name = "job" },
};
DYNCFG_TYPE dyncfg_type2id(const char *type) {
if(!type || !*type)
return DYNCFG_TYPE_SINGLE;
size_t entries = sizeof(dyncfg_types) / sizeof(dyncfg_types[0]);
for(size_t i = 0; i < entries ;i++) {
if(strcmp(dyncfg_types[i].name, type) == 0)
return dyncfg_types[i].type;
}
return DYNCFG_TYPE_SINGLE;
}
const char *dyncfg_id2type(DYNCFG_TYPE type) {
size_t entries = sizeof(dyncfg_types) / sizeof(dyncfg_types[0]);
for(size_t i = 0; i < entries ;i++) {
if(type == dyncfg_types[i].type)
return dyncfg_types[i].name;
}
return "single";
}
// ----------------------------------------------------------------------------
static struct {
DYNCFG_SOURCE_TYPE source_type;
const char *name;
} dyncfg_source_types[] = {
{ .source_type = DYNCFG_SOURCE_TYPE_INTERNAL, .name = "internal" },
{ .source_type = DYNCFG_SOURCE_TYPE_STOCK, .name = "stock" },
{ .source_type = DYNCFG_SOURCE_TYPE_USER, .name = "user" },
{ .source_type = DYNCFG_SOURCE_TYPE_DYNCFG, .name = "dyncfg" },
{ .source_type = DYNCFG_SOURCE_TYPE_DISCOVERED, .name = "discovered" },
};
DYNCFG_SOURCE_TYPE dyncfg_source_type2id(const char *source_type) {
if(!source_type || !*source_type)
return DYNCFG_SOURCE_TYPE_INTERNAL;
size_t entries = sizeof(dyncfg_source_types) / sizeof(dyncfg_source_types[0]);
for(size_t i = 0; i < entries ;i++) {
if(strcmp(dyncfg_source_types[i].name, source_type) == 0)
return dyncfg_source_types[i].source_type;
}
return DYNCFG_SOURCE_TYPE_INTERNAL;
}
const char *dyncfg_id2source_type(DYNCFG_SOURCE_TYPE source_type) {
size_t entries = sizeof(dyncfg_source_types) / sizeof(dyncfg_source_types[0]);
for(size_t i = 0; i < entries ;i++) {
if(source_type == dyncfg_source_types[i].source_type)
return dyncfg_source_types[i].name;
}
return "internal";
}
// ----------------------------------------------------------------------------
static struct {
DYNCFG_STATUS status;
const char *name;
} dyncfg_statuses[] = {
{ .status = DYNCFG_STATUS_NONE, .name = "none" },
{ .status = DYNCFG_STATUS_ACCEPTED, .name = "accepted" },
{ .status = DYNCFG_STATUS_RUNNING, .name = "running" },
{ .status = DYNCFG_STATUS_FAILED, .name = "failed" },
{ .status = DYNCFG_STATUS_DISABLED, .name = "disabled" },
{ .status = DYNCFG_STATUS_ORPHAN, .name = "orphan" },
{ .status = DYNCFG_STATUS_INCOMPLETE, .name = "incomplete" },
};
DYNCFG_STATUS dyncfg_status2id(const char *status) {
if(!status || !*status)
return DYNCFG_STATUS_NONE;
size_t entries = sizeof(dyncfg_statuses) / sizeof(dyncfg_statuses[0]);
for(size_t i = 0; i < entries ;i++) {
if(strcmp(dyncfg_statuses[i].name, status) == 0)
return dyncfg_statuses[i].status;
}
return DYNCFG_STATUS_NONE;
}
const char *dyncfg_id2status(DYNCFG_STATUS status) {
size_t entries = sizeof(dyncfg_statuses) / sizeof(dyncfg_statuses[0]);
for(size_t i = 0; i < entries ;i++) {
if(status == dyncfg_statuses[i].status)
return dyncfg_statuses[i].name;
}
return "none";
}
// ----------------------------------------------------------------------------
static struct {
DYNCFG_CMDS cmd;
const char *name;
} cmd_map[] = {
{ .cmd = DYNCFG_CMD_GET, .name = "get" },
{ .cmd = DYNCFG_CMD_SCHEMA, .name = "schema" },
{ .cmd = DYNCFG_CMD_UPDATE, .name = "update" },
{ .cmd = DYNCFG_CMD_ADD, .name = "add" },
{ .cmd = DYNCFG_CMD_TEST, .name = "test" },
{ .cmd = DYNCFG_CMD_REMOVE, .name = "remove" },
{ .cmd = DYNCFG_CMD_ENABLE, .name = "enable" },
{ .cmd = DYNCFG_CMD_DISABLE, .name = "disable" },
{ .cmd = DYNCFG_CMD_RESTART, .name = "restart" }
};
const char *dyncfg_id2cmd_one(DYNCFG_CMDS cmd) {
for (size_t i = 0; i < sizeof(cmd_map) / sizeof(cmd_map[0]); i++) {
if(cmd == cmd_map[i].cmd)
return cmd_map[i].name;
}
return NULL;
}
DYNCFG_CMDS dyncfg_cmds2id(const char *cmds) {
if(!cmds || !*cmds)
return DYNCFG_CMD_NONE;
DYNCFG_CMDS result = DYNCFG_CMD_NONE;
const char *p = cmds;
size_t len, i;
while (*p) {
// Skip any leading spaces
while (*p == ' ') p++;
// Find the end of the current word
const char *end = p;
while (*end && *end != ' ') end++;
len = end - p;
// Compare with known commands
for (i = 0; i < sizeof(cmd_map) / sizeof(cmd_map[0]); i++) {
if (strncmp(p, cmd_map[i].name, len) == 0 && cmd_map[i].name[len] == '\0') {
result |= cmd_map[i].cmd;
break;
}
}
// Move to the next word
p = end;
}
return result;
}
void dyncfg_cmds2fp(DYNCFG_CMDS cmds, FILE *fp) {
for (size_t i = 0; i < sizeof(cmd_map) / sizeof(cmd_map[0]); i++) {
if(cmds & cmd_map[i].cmd)
fprintf(fp, "%s ", cmd_map[i].name);
}
}
void dyncfg_cmds2json_array(DYNCFG_CMDS cmds, const char *key, BUFFER *wb) {
buffer_json_member_add_array(wb, key);
for (size_t i = 0; i < sizeof(cmd_map) / sizeof(cmd_map[0]); i++) {
if(cmds & cmd_map[i].cmd)
buffer_json_add_array_item_string(wb, cmd_map[i].name);
}
buffer_json_array_close(wb);
}
void dyncfg_cmds2buffer(DYNCFG_CMDS cmds, BUFFER *wb) {
size_t added = 0;
for (size_t i = 0; i < sizeof(cmd_map) / sizeof(cmd_map[0]); i++) {
if(cmds & cmd_map[i].cmd) {
if(added)
buffer_fast_strcat(wb, " ", 1);
buffer_strcat(wb, cmd_map[i].name);
added++;
}
}
}
// ----------------------------------------------------------------------------
bool dyncfg_is_valid_id(const char *id) {
const char *s = id;
while(*s) {
if(isspace(*s) || *s == '\'') return false;
s++;
}
return true;
}
char *dyncfg_escape_id_for_filename(const char *id) {
if (id == NULL) return NULL;
// Allocate memory for the worst case, where every character is escaped.
char *escaped = mallocz(strlen(id) * 3 + 1); // Each char can become '%XX', plus '\0'
if (!escaped) return NULL;
const char *src = id;
char *dest = escaped;
while (*src) {
if (*src == '/' || isspace(*src) || !isprint(*src)) {
sprintf(dest, "%%%02X", (unsigned char)*src);
dest += 3;
} else {
*dest++ = *src;
}
src++;
}
*dest = '\0';
return escaped;
}
// ----------------------------------------------------------------------------
int dyncfg_default_response(BUFFER *wb, int code, const char *msg) {
buffer_flush(wb);
wb->content_type = CT_APPLICATION_JSON;
wb->expires = now_realtime_sec();
buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY);
buffer_json_member_add_uint64(wb, "status", code);
buffer_json_member_add_string(wb, "message", msg);
buffer_json_finalize(wb);
return code;
}
int dyncfg_node_find_and_call(DICTIONARY *dyncfg_nodes, const char *transaction, const char *function,
usec_t *stop_monotonic_ut, bool *cancelled,
BUFFER *payload, const char *source, BUFFER *result) {
if(!function || !*function)
return dyncfg_default_response(result, HTTP_RESP_BAD_REQUEST, "command received is empty");
char buf[strlen(function) + 1];
memcpy(buf, function, sizeof(buf));
char *words[MAX_FUNCTION_PARAMETERS]; // an array of pointers for the words in this line
size_t num_words = quoted_strings_splitter_pluginsd(buf, words, MAX_FUNCTION_PARAMETERS);
const char *id = get_word(words, num_words, 1);
const char *action = get_word(words, num_words, 2);
if(!id || !*id)
return dyncfg_default_response(result, HTTP_RESP_BAD_REQUEST, "dyncfg node: id is missing from the request");
if(!action || !*action)
return dyncfg_default_response(result, HTTP_RESP_BAD_REQUEST, "dyncfg node: action is missing from the request");
DYNCFG_CMDS cmd = dyncfg_cmds2id(action);
if(cmd == DYNCFG_CMD_NONE)
return dyncfg_default_response(result, HTTP_RESP_BAD_REQUEST, "dyncfg node: action given in request is unknown");
const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item(dyncfg_nodes, id);
if(!item)
return dyncfg_default_response(result, HTTP_RESP_NOT_FOUND, "dyncfg node: id is not found");
struct dyncfg_node *df = dictionary_acquired_item_value(item);
buffer_flush(result);
result->content_type = CT_APPLICATION_JSON;
int code = df->cb(transaction, id, cmd, payload, stop_monotonic_ut, cancelled, result, source, df->data);
if(!result->expires)
result->expires = now_realtime_sec();
if(!buffer_tostring(result))
dyncfg_default_response(result, code, "");
dictionary_acquired_item_release(dyncfg_nodes, item);
return code;
}

View file

@ -0,0 +1,85 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#ifndef LIBNETDATA_DYNCFG_H
#define LIBNETDATA_DYNCFG_H
#define DYNCFG_VERSION (size_t)1
#define DYNCFG_RESP_SUCCESS(code) (code >= 200 && code <= 299)
#define DYNCFG_RESP_RUNNING 200 // accepted and running
#define DYNCFG_RESP_ACCEPTED 202 // accepted, but not running yet
#define DYNCFG_RESP_ACCEPTED_RESTART_REQUIRED 299 // accepted, but restart is required to apply it
typedef enum __attribute__((packed)) {
DYNCFG_TYPE_SINGLE = 0,
DYNCFG_TYPE_TEMPLATE,
DYNCFG_TYPE_JOB,
} DYNCFG_TYPE;
DYNCFG_TYPE dyncfg_type2id(const char *type);
const char *dyncfg_id2type(DYNCFG_TYPE type);
typedef enum __attribute__((packed)) {
DYNCFG_SOURCE_TYPE_INTERNAL = 0,
DYNCFG_SOURCE_TYPE_STOCK,
DYNCFG_SOURCE_TYPE_USER,
DYNCFG_SOURCE_TYPE_DYNCFG,
DYNCFG_SOURCE_TYPE_DISCOVERED,
} DYNCFG_SOURCE_TYPE;
DYNCFG_SOURCE_TYPE dyncfg_source_type2id(const char *source_type);
const char *dyncfg_id2source_type(DYNCFG_SOURCE_TYPE source_type);
typedef enum __attribute__((packed)) {
DYNCFG_STATUS_NONE = 0,
DYNCFG_STATUS_ACCEPTED, // the plugin has accepted the configuration
DYNCFG_STATUS_RUNNING, // the plugin runs the accepted configuration
DYNCFG_STATUS_FAILED, // the plugin fails to run the accepted configuration
DYNCFG_STATUS_DISABLED, // the configuration is disabled by a user
DYNCFG_STATUS_ORPHAN, // no plugin has claimed this configurations
DYNCFG_STATUS_INCOMPLETE, // a special kind of failed configuration
} DYNCFG_STATUS;
DYNCFG_STATUS dyncfg_status2id(const char *status);
const char *dyncfg_id2status(DYNCFG_STATUS status);
typedef enum __attribute__((packed)) {
DYNCFG_CMD_NONE = 0,
DYNCFG_CMD_GET = (1 << 0),
DYNCFG_CMD_SCHEMA = (1 << 1),
DYNCFG_CMD_UPDATE = (1 << 2),
DYNCFG_CMD_ADD = (1 << 3),
DYNCFG_CMD_TEST = (1 << 4),
DYNCFG_CMD_REMOVE = (1 << 5),
DYNCFG_CMD_ENABLE = (1 << 6),
DYNCFG_CMD_DISABLE = (1 << 7),
DYNCFG_CMD_RESTART = (1 << 8),
} DYNCFG_CMDS;
DYNCFG_CMDS dyncfg_cmds2id(const char *cmds);
void dyncfg_cmds2buffer(DYNCFG_CMDS cmds, struct web_buffer *wb);
void dyncfg_cmds2json_array(DYNCFG_CMDS cmds, const char *key, struct web_buffer *wb);
void dyncfg_cmds2fp(DYNCFG_CMDS cmds, FILE *fp);
const char *dyncfg_id2cmd_one(DYNCFG_CMDS cmd);
bool dyncfg_is_valid_id(const char *id);
char *dyncfg_escape_id_for_filename(const char *id);
#include "../clocks/clocks.h"
#include "../buffer/buffer.h"
#include "../dictionary/dictionary.h"
typedef int (*dyncfg_cb_t)(const char *transaction, const char *id, DYNCFG_CMDS cmd, BUFFER *payload, usec_t *stop_monotonic_ut, bool *cancelled, BUFFER *result, const char *source, void *data);
struct dyncfg_node {
DYNCFG_TYPE type;
DYNCFG_CMDS cmds;
dyncfg_cb_t cb;
void *data;
};
#define dyncfg_nodes_dictionary_create() dictionary_create_advanced(DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct dyncfg_node))
int dyncfg_default_response(BUFFER *wb, int code, const char *msg);
int dyncfg_node_find_and_call(DICTIONARY *dyncfg_nodes, const char *transaction, const char *function,
usec_t *stop_monotonic_ut, bool *cancelled,
BUFFER *payload, const char *source, BUFFER *result);
#endif //LIBNETDATA_DYNCFG_H

View file

@ -996,7 +996,9 @@ static int item_check_and_acquire_advanced(DICTIONARY *dict, DICTIONARY_ITEM *it
if (having_index_lock) {
// delete it from the hashtable
if(hashtable_delete_unsafe(dict, item_get_name(item), item->key_len, item) == 0)
netdata_log_error("DICTIONARY: INTERNAL ERROR VIEW: tried to delete item with name '%s', name_len %u that is not in the index", item_get_name(item), (KEY_LEN_TYPE)(item->key_len - 1));
netdata_log_error("DICTIONARY: INTERNAL ERROR VIEW: tried to delete item with name '%s', "
"name_len %u that is not in the index",
item_get_name(item), (KEY_LEN_TYPE)(item->key_len));
else
pointer_del(dict, item);
@ -1237,7 +1239,7 @@ static inline size_t item_set_name(DICTIONARY *dict, DICTIONARY_ITEM *item, cons
}
else {
item->string_name = string_strdupz(name);
item->key_len = string_strlen(item->string_name) + 1;
item->key_len = string_strlen(item->string_name);
item->options |= ITEM_OPTION_ALLOCATED_NAME;
}
@ -1584,7 +1586,7 @@ static inline void dict_item_release_and_check_if_it_is_deleted_and_can_be_remov
static bool dict_item_del(DICTIONARY *dict, const char *name, ssize_t name_len) {
if(name_len == -1)
name_len = (ssize_t)strlen(name) + 1; // we need the terminating null too
name_len = (ssize_t)strlen(name);
netdata_log_debug(D_DICTIONARY, "DEL dictionary entry with name '%s'.", name);
@ -1602,9 +1604,9 @@ static bool dict_item_del(DICTIONARY *dict, const char *name, ssize_t name_len)
}
else {
if(hashtable_delete_unsafe(dict, name, name_len, item) == 0)
netdata_log_error("DICTIONARY: INTERNAL ERROR: tried to delete item with name '%s', name_len %zd that is not in the index",
name,
name_len - 1);
netdata_log_error("DICTIONARY: INTERNAL ERROR: tried to delete item with name '%s', "
"name_len %zd that is not in the index",
name, name_len);
else
pointer_del(dict, item);
@ -1635,7 +1637,7 @@ static DICTIONARY_ITEM *dict_item_add_or_reset_value_and_acquire(DICTIONARY *dic
}
if(name_len == -1)
name_len = (ssize_t)strlen(name) + 1; // we need the terminating null too
name_len = (ssize_t)strlen(name);
netdata_log_debug(D_DICTIONARY, "SET dictionary entry with name '%s'.", name);
@ -1754,7 +1756,7 @@ static DICTIONARY_ITEM *dict_item_find_and_acquire(DICTIONARY *dict, const char
}
if(name_len == -1)
name_len = (ssize_t)strlen(name) + 1; // we need the terminating null too
name_len = (ssize_t)strlen(name);
netdata_log_debug(D_DICTIONARY, "GET dictionary entry with name '%s'.", name);
@ -1990,11 +1992,12 @@ static bool api_is_name_good_with_trace(DICTIONARY *dict __maybe_unused, const c
}
internal_error(
name_len > 0 && name_len != (ssize_t)(strlen(name) + 1),
"DICTIONARY: attempted to %s() with a name of '%s', having length of %zu (incl. '\\0'), but the supplied name_len = %ld, on a dictionary created from %s() %zu@%s.",
name_len > 0 && name_len != (ssize_t)strlen(name),
"DICTIONARY: attempted to %s() with a name of '%s', having length of %zu, "
"but the supplied name_len = %ld, on a dictionary created from %s() %zu@%s.",
function,
name,
strlen(name) + 1,
strlen(name),
(long int) name_len,
dict?dict->creation_function:"unknown",
dict?dict->creation_line:0,
@ -2002,10 +2005,11 @@ static bool api_is_name_good_with_trace(DICTIONARY *dict __maybe_unused, const c
internal_error(
name_len <= 0 && name_len != -1,
"DICTIONARY: attempted to %s() with a name of '%s', having length of %zu (incl. '\\0'), but the supplied name_len = %ld, on a dictionary created from %s() %zu@%s.",
"DICTIONARY: attempted to %s() with a name of '%s', having length of %zu, "
"but the supplied name_len = %ld, on a dictionary created from %s() %zu@%s.",
function,
name,
strlen(name) + 1,
strlen(name),
(long int) name_len,
dict?dict->creation_function:"unknown",
dict?dict->creation_line:0,
@ -2109,7 +2113,7 @@ void dictionary_flush(DICTIONARY *dict) {
DICTIONARY_ITEM *item, *next = NULL;
for(item = dict->items.list; item ;item = next) {
next = item->next;
dict_item_del(dict, item_get_name(item), (ssize_t) item_get_name_len(item) + 1);
dict_item_del(dict, item_get_name(item), (ssize_t)item_get_name_len(item));
}
ll_recursive_unlock(dict, DICTIONARY_LOCK_WRITE);
@ -2580,7 +2584,7 @@ void *thread_cache_entry_get_or_set(void *key,
if(unlikely(!key || !key_length)) return NULL;
if(key_length == -1)
key_length = (ssize_t)strlen((char *)key) + 1;
key_length = (ssize_t)strlen((char *)key);
JError_t J_Error;
Pvoid_t *Rc = JudyHSIns(&thread_cache_judy_array, key, key_length, &J_Error);
@ -2627,7 +2631,7 @@ static char **dictionary_unittest_generate_names(size_t entries) {
char **names = mallocz(sizeof(char *) * entries);
for(size_t i = 0; i < entries ;i++) {
char buf[25 + 1] = "";
snprintfz(buf, sizeof(buf) - 1, "name.%zu.0123456789.%zu!@#$%%^&*(),./[]{}\\|~`", i, entries / 2 + i);
snprintfz(buf, sizeof(buf), "name.%zu.0123456789.%zu!@#$%%^&*(),./[]{}\\|~`", i, entries / 2 + i);
names[i] = strdupz(buf);
}
return names;
@ -2637,7 +2641,7 @@ static char **dictionary_unittest_generate_values(size_t entries) {
char **values = mallocz(sizeof(char *) * entries);
for(size_t i = 0; i < entries ;i++) {
char buf[25 + 1] = "";
snprintfz(buf, sizeof(buf) - 1, "value-%zu-0987654321.%zu%%^&*(),. \t !@#$/[]{}\\|~`", i, entries / 2 + i);
snprintfz(buf, sizeof(buf), "value-%zu-0987654321.%zu%%^&*(),. \t !@#$/[]{}\\|~`", i, entries / 2 + i);
values[i] = strdupz(buf);
}
return values;
@ -2646,7 +2650,7 @@ static char **dictionary_unittest_generate_values(size_t entries) {
static size_t dictionary_unittest_set_clone(DICTIONARY *dict, char **names, char **values, size_t entries) {
size_t errors = 0;
for(size_t i = 0; i < entries ;i++) {
size_t vallen = strlen(values[i]) + 1;
size_t vallen = strlen(values[i]);
char *val = (char *)dictionary_set(dict, names[i], values[i], vallen);
if(val == values[i]) { fprintf(stderr, ">>> %s() returns reference to value\n", __FUNCTION__); errors++; }
if(!val || memcmp(val, values[i], vallen) != 0) { fprintf(stderr, ">>> %s() returns invalid value\n", __FUNCTION__); errors++; }
@ -2673,7 +2677,7 @@ static size_t dictionary_unittest_set_null(DICTIONARY *dict, char **names, char
static size_t dictionary_unittest_set_nonclone(DICTIONARY *dict, char **names, char **values, size_t entries) {
size_t errors = 0;
for(size_t i = 0; i < entries ;i++) {
size_t vallen = strlen(values[i]) + 1;
size_t vallen = strlen(values[i]);
char *val = (char *)dictionary_set(dict, names[i], values[i], vallen);
if(val != values[i]) { fprintf(stderr, ">>> %s() returns invalid pointer to value\n", __FUNCTION__); errors++; }
}
@ -2683,7 +2687,7 @@ static size_t dictionary_unittest_set_nonclone(DICTIONARY *dict, char **names, c
static size_t dictionary_unittest_get_clone(DICTIONARY *dict, char **names, char **values, size_t entries) {
size_t errors = 0;
for(size_t i = 0; i < entries ;i++) {
size_t vallen = strlen(values[i]) + 1;
size_t vallen = strlen(values[i]);
char *val = (char *)dictionary_get(dict, names[i]);
if(val == values[i]) { fprintf(stderr, ">>> %s() returns reference to value\n", __FUNCTION__); errors++; }
if(!val || memcmp(val, values[i], vallen) != 0) { fprintf(stderr, ">>> %s() returns invalid value\n", __FUNCTION__); errors++; }
@ -2751,7 +2755,7 @@ static size_t dictionary_unittest_reset_clone(DICTIONARY *dict, char **names, ch
// set the name as value too
size_t errors = 0;
for(size_t i = 0; i < entries ;i++) {
size_t vallen = strlen(names[i]) + 1;
size_t vallen = strlen(names[i]);
char *val = (char *)dictionary_set(dict, names[i], names[i], vallen);
if(val == names[i]) { fprintf(stderr, ">>> %s() returns reference to value\n", __FUNCTION__); errors++; }
if(!val || memcmp(val, names[i], vallen) != 0) { fprintf(stderr, ">>> %s() returns invalid value\n", __FUNCTION__); errors++; }
@ -2764,7 +2768,7 @@ static size_t dictionary_unittest_reset_nonclone(DICTIONARY *dict, char **names,
// set the name as value too
size_t errors = 0;
for(size_t i = 0; i < entries ;i++) {
size_t vallen = strlen(names[i]) + 1;
size_t vallen = strlen(names[i]);
char *val = (char *)dictionary_set(dict, names[i], names[i], vallen);
if(val != names[i]) { fprintf(stderr, ">>> %s() returns invalid pointer to value\n", __FUNCTION__); errors++; }
if(!val) { fprintf(stderr, ">>> %s() returns invalid value\n", __FUNCTION__); errors++; }
@ -2776,7 +2780,7 @@ static size_t dictionary_unittest_reset_dont_overwrite_nonclone(DICTIONARY *dict
// set the name as value too
size_t errors = 0;
for(size_t i = 0; i < entries ;i++) {
size_t vallen = strlen(names[i]) + 1;
size_t vallen = strlen(names[i]);
char *val = (char *)dictionary_set(dict, names[i], names[i], vallen);
if(val != values[i]) { fprintf(stderr, ">>> %s() returns invalid pointer to value\n", __FUNCTION__); errors++; }
}
@ -3253,13 +3257,13 @@ static void *unittest_dict_thread(void *arg) {
char buf [256 + 1];
for (int i = 0; i < 1000; i++) {
snprintfz(buf, sizeof(buf) - 1, "del/flush test %d", i);
snprintfz(buf, sizeof(buf), "del/flush test %d", i);
dictionary_set(tu->dict, buf, NULL, 0);
tu->stats.ops.inserts++;
}
for (int i = 0; i < 1000; i++) {
snprintfz(buf, sizeof(buf) - 1, "del/flush test %d", i);
snprintfz(buf, sizeof(buf), "del/flush test %d", i);
dictionary_del(tu->dict, buf);
tu->stats.ops.deletes++;
}
@ -3392,7 +3396,7 @@ static void *unittest_dict_master_thread(void *arg) {
while(!__atomic_load_n(&tv->join, __ATOMIC_RELAXED)) {
if(!item)
item = dictionary_set_and_acquire_item(tv->master, "ITEM1", "123", strlen("123") + 1);
item = dictionary_set_and_acquire_item(tv->master, "ITEM1", "123", strlen("123"));
if(__atomic_load_n(&tv->item_master, __ATOMIC_RELAXED) != NULL) {
dictionary_acquired_item_release(tv->master, item);

View file

@ -1,188 +0,0 @@
# Netdata Dynamic Configuration
Purpose of Netdata Dynamic Configuration is to allow configuration of select Netdata plugins and options through the
Netdata API and by extension by UI.
## HTTP API documentation
### Summary API
For summary of all jobs and their statuses (for all children that stream to parent) use the following URL:
| Method | Endpoint | Description |
|:-------:|-------------------------------|------------------------------------------------------------|
| **GET** | `api/v2/job_statuses` | list of Jobs |
| **GET** | `api/v2/job_statuses?grouped` | list of Jobs (hierarchical, grouped by host/plugin/module) |
### Dyncfg API
### Top level
| Method | Endpoint | Description |
|:-------:|------------------|-----------------------------------------|
| **GET** | `/api/v2/config` | registered Plugins (sent DYNCFG_ENABLE) |
### Plugin level
| Method | Endpoint | Description |
|:-------:|-----------------------------------|------------------------------|
| **GET** | `/api/v2/config/[plugin]` | Plugin config |
| **PUT** | `/api/v2/config/[plugin]` | update Plugin config |
| **GET** | `/api/v2/config/[plugin]/modules` | Modules registered by Plugin |
| **GET** | `/api/v2/config/[plugin]/schema` | Plugin config schema |
### Module level
| Method | Endpoint | Description |
|:-------:|-----------------------------------------------|---------------------------|
| **GET** | `/api/v2/config/<plugin>/[module]` | Module config |
| **PUT** | `/api/v2/config/[plugin]/[module]` | update Module config |
| **GET** | `/api/v2/config/[plugin]/[module]/jobs` | Jobs registered by Module |
| **GET** | `/api/v2/config/[plugin]/[module]/job_schema` | Job config schema |
| **GET** | `/api/v2/config/[plugin]/[module]/schema` | Module config schema |
### Job level - only for modules where `module_type == job_array`
| Method | Endpoint | Description |
|:----------:|------------------------------------------|--------------------------------|
| **GET** | `/api/v2/config/[plugin]/[module]/[job]` | Job config |
| **PUT** | `/api/v2/config/[plugin]/[module]/[job]` | update Job config |
| **POST** | `/api/v2/config/[plugin]/[module]/[job]` | create Job |
| **DELETE** | `/api/v2/config/[plugin]/[module]/[job]` | delete Job (created by Dyncfg) |
## Internal Plugins API
TBD
## External Plugins API
### Commands plugins can use
#### DYNCFG_ENABLE
Plugin signifies to agent its ability to use new dynamic config and the name it wishes to use by sending
```
DYNCFG_ENABLE [{PLUGIN_NAME}]
```
This can be sent only once per lifetime of the plugin (at startup or later) sending it multiple times is considered a
protocol violation and plugin might get terminated.
After this command is sent the plugin has to be ready to accept all the new commands/keywords related to dynamic
configuration (this command lets agent know this plugin is dyncfg capable and wishes to use dyncfg functionality).
#### DYNCFG_RESET
Sending this, will reset the internal state of the agent, considering this a `DYNCFG_ENABLE`.
```
DYNCFG_RESET
```
#### DYNCFG_REGISTER_MODULE
```
DYNCFG_REGISTER_MODULE {MODULE_NAME} {MODULE_TYPE}
```
Module has to choose one of following types at registration:
- `single` - module itself has configuration but does not accept any jobs *(this is useful mainly for internal netdata
configurable things like webserver etc.)*
- `job_array` - module itself **can** *(not must)* have configuration and it has an array of jobs which can be added,
modified and deleted. **this is what plugin developer needs in most cases**
After a module has been registered agent can call `set_module_config`, `get_module_config` and `get_module_config_schema`.
When `MODULE_TYPE` is `job_array` the agent may also send `set_job_config`, `get_job_config` and `get_job_config_schema`.
#### DYNCFG_REGISTER_JOB
The plugin can use `DYNCFG_REGISTER_JOB` to register its own configuration jobs. It should not register jobs configured
via DYNCFG (doing so, the agent will shutdown the plugin).
```
DYNCFG_REGISTER_JOB {MODULE_NAME} {JOB_NAME} {JOB_TYPE} {FLAGS}
```
Where:
- `MODULE_NAME` is the name of the module.
- `JOB_NAME` is the name of the job.
- `JOB_TYPE` is either `stock` or `autodiscovered`.
- `FLAGS`, just send zero.
#### REPORT_JOB_STATUS
```
REPORT_JOB_STATUS {MODULE_NAME} {JOB_NAME} {STATUS} {STATE} ["REASON"]
```
Note the REASON parameter is optional and can be entirelly ommited (for example when state is OK there is no need to send any reason).
Where:
- `MODULE_NAME` is the name of the module.
- `JOB_NAME` is the name of the job.
- `STATUS` is one of `stopped`, `running`, or `error`.
- `STATE`, just send zero.
- `REASON` is a message describing the status. In case you don't want to send any reason string it is preferable to omit this parameter altogether (as opposed to sending empty string `""`).
### Commands plugins must serve
Once a plugin calls `DYNCFG_ENABLE`, the must be able to handle these calls.
function|parameters|prerequisites|request payload|response payload|
:---:|:---:|:---:|:---:|:---:|
`set_plugin_config`|none|`DYNCFG_ENABLE`|plugin configuration|none|
`get_plugin_config`|none|`DYNCFG_ENABLE`|none|plugin configuration|
`get_plugin_config_schema`|none|`DYNCFG_ENABLE`|none|plugin configuration schema|
`set_module_config`|`module_name`|`DYNCFG_REGISTER_MODULE`|module configuration|none|
`get_module_config`|`module_name`|`DYNCFG_REGISTER_MODULE`|none|module configuration|
`get_module_config_schema`|`module_name`|`DYNCFG_REGISTER_MODULE`|none|module configuration schema|
`set_job_config`|`module_name`, `job_name`|`DYNCFG_REGISTER_MODULE`|job configuration|none|
`get_job_config`|`module_name`, `job_name`|`DYNCFG_REGISTER_MODULE`|none|job configuration|
`get_job_config_schema`|`module_name`, `job_name`|`DYNCFG_REGISTER_MODULE`|none|job configuration schema|
All of them work like this:
If the request payload is `none`, then the request looks like this:
```bash
FUNCTION {TRANSACTION_UUID} {TIMEOUT_SECONDS} "{function} {parameters}"
```
When there is payload, the request looks like this:
```bash
FUNCTION_PAYLOAD {TRANSACTION_UUID} {TIMEOUT_SECONDS} "{function} {parameters}"
<payload>
FUNCTION_PAYLOAD_END
```
In all cases, the response is like this:
```bash
FUNCTION_RESULT_BEGIN {TRANSACTION_UUID} {HTTP_RESPONSE_CODE} "{CONTENT_TYPE}" {EXPIRATION_TIMESTAMP}
<payload>
FUNCTION_RESULT_END
```
Where:
- `TRANSACTION_UUID` is the same UUID received with the request.
- `HTTP_RESPONSE_CODE` is either `0` (rejected) or `1` (accepted).
- `CONTENT_TYPE` should reflect the `payload` returned.
- `EXPIRATION_TIMESTAMP` can be zero.
## DYNCFG with streaming
When above commands are transferred trough streaming additionally `plugin_name` is prefixed as first parameter. This is
done to allow routing to appropriate plugin @child.
As a plugin developer you don't need to concern yourself with this detail as that parameter is stripped when sent to the
plugin *(and added when sent trough streaming)* automagically.

File diff suppressed because it is too large Load diff

View file

@ -1,237 +0,0 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#ifndef DYN_CONF_H
#define DYN_CONF_H
#include "../libnetdata.h"
#define FUNCTION_NAME_GET_PLUGIN_CONFIG "get_plugin_config"
#define FUNCTION_NAME_GET_PLUGIN_CONFIG_SCHEMA "get_plugin_config_schema"
#define FUNCTION_NAME_GET_MODULE_CONFIG "get_module_config"
#define FUNCTION_NAME_GET_MODULE_CONFIG_SCHEMA "get_module_config_schema"
#define FUNCTION_NAME_GET_JOB_CONFIG "get_job_config"
#define FUNCTION_NAME_GET_JOB_CONFIG_SCHEMA "get_job_config_schema"
#define FUNCTION_NAME_SET_PLUGIN_CONFIG "set_plugin_config"
#define FUNCTION_NAME_SET_MODULE_CONFIG "set_module_config"
#define FUNCTION_NAME_SET_JOB_CONFIG "set_job_config"
#define FUNCTION_NAME_DELETE_JOB "delete_job"
#define DYNCFG_MAX_WORDS 5
#define DYNCFG_VFNC_RET_CFG_ACCEPTED (1)
enum module_type {
MOD_TYPE_UNKNOWN = 0,
MOD_TYPE_ARRAY,
MOD_TYPE_SINGLE
};
static inline enum module_type str2_module_type(const char *type_name)
{
if (strcmp(type_name, "job_array") == 0)
return MOD_TYPE_ARRAY;
else if (strcmp(type_name, "single") == 0)
return MOD_TYPE_SINGLE;
return MOD_TYPE_UNKNOWN;
}
static inline const char *module_type2str(enum module_type type)
{
switch (type) {
case MOD_TYPE_ARRAY:
return "job_array";
case MOD_TYPE_SINGLE:
return "single";
default:
return "unknown";
}
}
struct dyncfg_config {
void *data;
size_t data_size;
};
typedef struct dyncfg_config dyncfg_config_t;
struct configurable_plugin;
struct module;
enum job_status {
JOB_STATUS_UNKNOWN = 0, // State used until plugin reports first status
JOB_STATUS_STOPPED,
JOB_STATUS_RUNNING,
JOB_STATUS_ERROR
};
static inline enum job_status str2job_state(const char *state_name) {
if (strcmp(state_name, "stopped") == 0)
return JOB_STATUS_STOPPED;
else if (strcmp(state_name, "running") == 0)
return JOB_STATUS_RUNNING;
else if (strcmp(state_name, "error") == 0)
return JOB_STATUS_ERROR;
return JOB_STATUS_UNKNOWN;
}
const char *job_status2str(enum job_status status);
enum set_config_result {
SET_CONFIG_ACCEPTED = 0,
SET_CONFIG_REJECTED,
SET_CONFIG_DEFFER
};
typedef uint32_t dyncfg_job_flg_t;
enum job_flags {
JOB_FLG_PS_LOADED = 1 << 0, // PS abbr. Persistent Storage
JOB_FLG_PLUGIN_PUSHED = 1 << 1, // got it from plugin (e.g. autodiscovered job)
JOB_FLG_STREAMING_PUSHED = 1 << 2, // got it through streaming
JOB_FLG_USER_CREATED = 1 << 3, // user created this job during agent runtime
};
enum job_type {
JOB_TYPE_UNKNOWN = 0,
JOB_TYPE_STOCK = 1,
JOB_TYPE_USER = 2,
JOB_TYPE_AUTODISCOVERED = 3,
};
static inline const char* job_type2str(enum job_type type)
{
switch (type) {
case JOB_TYPE_STOCK:
return "stock";
case JOB_TYPE_USER:
return "user";
case JOB_TYPE_AUTODISCOVERED:
return "autodiscovered";
case JOB_TYPE_UNKNOWN:
default:
return "unknown";
}
}
static inline enum job_type dyncfg_str2job_type(const char *type_name)
{
if (strcmp(type_name, "stock") == 0)
return JOB_TYPE_STOCK;
else if (strcmp(type_name, "user") == 0)
return JOB_TYPE_USER;
else if (strcmp(type_name, "autodiscovered") == 0)
return JOB_TYPE_AUTODISCOVERED;
error_report("Unknown job type: %s", type_name);
return JOB_TYPE_UNKNOWN;
}
struct job
{
const char *name;
enum job_type type;
struct module *module;
pthread_mutex_t lock;
// lock protexts only fields below (which are modified during job existence)
// others are static during lifetime of job
int dirty; // this relates to rrdpush, true if parent has different data than us
// state reported by plugin
usec_t last_state_update;
enum job_status status; // reported by plugin, enum as this has to be interpreted by UI
int state; // code reported by plugin which can mean anything plugin wants
char *reason; // reported by plugin, can be NULL (optional)
dyncfg_job_flg_t flags;
};
struct module
{
pthread_mutex_t lock;
char *name;
enum module_type type;
struct configurable_plugin *plugin;
// module config
enum set_config_result (*set_config_cb)(void *usr_ctx, const char *plugin_name, const char *module_name, dyncfg_config_t *cfg);
dyncfg_config_t (*get_config_cb)(void *usr_ctx, const char *plugin_name, const char *module_name);
dyncfg_config_t (*get_config_schema_cb)(void *usr_ctx, const char *plugin_name, const char *module_name);
void *config_cb_usr_ctx;
DICTIONARY *jobs;
// jobs config
dyncfg_config_t (*get_job_config_cb)(void *usr_ctx, const char *plugin_name, const char *module_name, const char *job_name);
dyncfg_config_t (*get_job_config_schema_cb)(void *usr_ctx, const char *plugin_name, const char *module_name);
enum set_config_result (*set_job_config_cb)(void *usr_ctx, const char *plugin_name, const char *module_name, const char *job_name, dyncfg_config_t *cfg);
enum set_config_result (*delete_job_cb)(void *usr_ctx, const char *plugin_name, const char *module_name, const char *job_name);
void *job_config_cb_usr_ctx;
};
struct configurable_plugin {
pthread_mutex_t lock;
char *name;
DICTIONARY *modules;
const char *schema;
dyncfg_config_t (*get_config_cb)(void *usr_ctx, const char *plugin_name);
dyncfg_config_t (*get_config_schema_cb)(void *usr_ctx, const char *plugin_name);
enum set_config_result (*set_config_cb)(void *usr_ctx, const char *plugin_name, dyncfg_config_t *cfg);
void *cb_usr_ctx; // context for all callbacks (split if needed in future)
};
// API to be used by plugins
const DICTIONARY_ITEM *register_plugin(DICTIONARY *plugins_dict, struct configurable_plugin *plugin, bool localhost);
void unregister_plugin(DICTIONARY *plugins_dict, const DICTIONARY_ITEM *plugin);
int register_module(DICTIONARY *plugins_dict, struct configurable_plugin *plugin, struct module *module, bool localhost);
int register_job(DICTIONARY *plugins_dict, const char *plugin_name, const char *module_name, const char *job_name, enum job_type job_type, dyncfg_job_flg_t flags, int ignore_existing);
const DICTIONARY_ITEM *report_job_status_acq_lock(DICTIONARY *plugins_dict, const DICTIONARY_ITEM **plugin_acq_item, DICTIONARY **job_dict, const char *plugin_name, const char *module_name, const char *job_name, enum job_status status, int status_code, char *reason);
void dyn_conf_store_config(const char *function, const char *payload, struct configurable_plugin *plugin);
void unlink_job(const char *plugin_name, const char *module_name, const char *job_name);
void delete_job(struct configurable_plugin *plugin, const char *module_name, const char *job_name);
void delete_job_pname(DICTIONARY *plugins_dict, const char *plugin_name, const char *module_name, const char *job_name);
// API to be used by the web server(s)
json_object *get_list_of_plugins_json(DICTIONARY *plugins_dict);
struct configurable_plugin *get_plugin_by_name(DICTIONARY *plugins_dict, const char *name);
json_object *get_list_of_modules_json(struct configurable_plugin *plugin);
struct module *get_module_by_name(struct configurable_plugin *plugin, const char *module_name);
json_object *job2json(struct job *job);
// helper struct to make interface between internal webserver and h2o same
struct uni_http_response {
int status;
char *content;
size_t content_length;
HTTP_CONTENT_TYPE content_type;
void (*content_free)(void *);
};
struct uni_http_response dyn_conf_process_http_request(DICTIONARY *plugins_dict, int method, const char *plugin, const char *module, const char *job_id, void *payload, size_t payload_size);
// API to be used by main netdata process, initialization and destruction etc.
int dyn_conf_init(void);
void freez_dyncfg(void *ptr);
#define dyncfg_dictionary_create() dictionary_create(DICT_OPTION_VALUE_LINK_DONT_CLONE)
void plugin_del_cb(const DICTIONARY_ITEM *item, void *value, void *data);
void *dyncfg_main(void *in);
#define DYNCFG_FUNCTION_TYPE_REGULAR (1 << 0)
#define DYNCFG_FUNCTION_TYPE_PAYLOAD (1 << 1)
#define DYNCFG_FUNCTION_TYPE_GET (1 << 2)
#define DYNCFG_FUNCTION_TYPE_SET (1 << 3)
#define DYNCFG_FUNCTION_TYPE_DELETE (1 << 4)
#define DYNCFG_FUNCTION_TYPE_ALL \
(DYNCFG_FUNCTION_TYPE_REGULAR | DYNCFG_FUNCTION_TYPE_PAYLOAD | DYNCFG_FUNCTION_TYPE_GET | DYNCFG_FUNCTION_TYPE_SET | DYNCFG_FUNCTION_TYPE_DELETE)
bool is_dyncfg_function(const char *function_name, uint8_t type);
#endif //DYN_CONF_H

View file

@ -1,22 +0,0 @@
{
"http_endpoints": {
"parent": {
"host": "127.0.0.1",
"mguid": null,
"port": 20001,
"ssl": false
},
"child": {
"host": "127.0.0.1",
"mguid": "3bc2f7de-1445-11ee-9ed7-3c7c3f21784c",
"port": 19999,
"ssl": false
}
},
"global": {
"test_plugin_name": "external_plugin",
"test_array_module_name": "module_of_the_future",
"test_single_module_name": "module_of_the_future_single_type",
"test_job_name": "fixed_job"
}
}

View file

@ -1,192 +0,0 @@
class ParentChildTest
@@plugin_cfg = <<~HEREDOC
{ "test" : "true" }
HEREDOC
@@plugin_cfg2 = <<~HEREDOC
{ "asdfgh" : "asdfgh" }
HEREDOC
@@job_cfg = <<~HEREDOC
{ "i am newly created job" : "true" }
HEREDOC
def initialize
@parent = $config[:http_endpoints][:parent]
@child = $config[:http_endpoints][:child]
@plugin = $config[:global][:test_plugin_name]
@arry_mod = $config[:global][:test_array_module_name]
@single_mod = $config[:global][:test_single_module_name]
@test_job = $config[:global][:test_job_name]
end
def check_test_plugin_modules_list(host, child = nil)
rc = DynCfgHttpClient.get_plugin_module_list(host, @plugin, child)
assert_eq(rc.code, 200, "as HTTP code for get_module_list request on plugin \"#{@plugin}\"")
modules = nil
assert_nothing_raised do
modules = JSON.parse(rc.parsed_response, symbolize_names: true)
end
assert_has_key?(modules, :modules)
assert_eq(modules[:modules].count, 2, "as number of modules in plugin \"#{@plugin}\"")
modules[:modules].each do |m|
assert_has_key?(m, :name)
assert_has_key?(m, :type)
assert_is_one_of(m[:type], "job_array", "single")
end
assert_eq_str(modules[:modules][0][:name], @arry_mod, "name of first module in plugin \"#{@plugin}\"")
assert_eq_str(modules[:modules][1][:name], @single_mod, "name of second module in plugin \"#{@plugin}\"")
end
def run
TEST_SUITE("Parent/Child plugin config")
TEST("parent/child/get_plugin_list", "Get child (hops:1) plugin list trough parent")
plugins = DynCfgHttpClient.get_plugin_list(@parent, @child)
assert_eq(plugins.code, 200, "as HTTP code for get_plugin_list request")
assert_nothing_raised do
plugins = JSON.parse(plugins.parsed_response, symbolize_names: true)
end
assert_has_key?(plugins, :configurable_plugins)
assert_array_include?(plugins[:configurable_plugins], @plugin)
PASS()
TEST("parent/child/(set/get)plugin_config", "Set then get and compare child (hops:1) plugin config trough parent")
rc = DynCfgHttpClient.set_plugin_config(@parent, @plugin, @@plugin_cfg, @child)
assert_eq(rc.code, 200, "as HTTP code for set_plugin_config request")
rc = DynCfgHttpClient.get_plugin_config(@parent, @plugin, @child)
assert_eq(rc.code, 200, "as HTTP code for get_plugin_config request")
assert_eq_str(rc.parsed_response.chomp!, @@plugin_cfg, "as plugin config")
# We do this twice with different configs to ensure first config was not loaded from persistent storage (from previous tests)
rc = DynCfgHttpClient.set_plugin_config(@parent, @plugin, @@plugin_cfg2, @child)
assert_eq(rc.code, 200, "as HTTP code for set_plugin_config request 2")
rc = DynCfgHttpClient.get_plugin_config(@parent, @plugin, @child)
assert_eq(rc.code, 200, "as HTTP code for get_plugin_config request 2")
assert_eq_str(rc.parsed_response.chomp!, @@plugin_cfg2, "set/get plugin config 2")
PASS()
TEST("child/get_plugin_config", "Get child (hops:0) plugin config and compare with what we got trough parent (set_plugin_config from previous test)")
rc = DynCfgHttpClient.get_plugin_config(@child, @plugin, nil)
assert_eq(rc.code, 200, "as HTTP code for get_plugin_config request")
assert_eq_str(rc.parsed_response.chomp!, @@plugin_cfg2.chomp, "as plugin config")
PASS()
TEST("parent/child/plugin_module_list", "Get child (hops:1) plugin module list trough parent and check its contents")
check_test_plugin_modules_list(@parent, @child)
PASS()
TEST("child/plugin_module_list", "Get child (hops:0) plugin module list directly and check its contents")
check_test_plugin_modules_list(@child, nil)
PASS()
TEST("parent/child/module/jobs", "Get list of jobs from child (hops:1) trough parent and check its contents, check job updates")
rc = DynCfgHttpClient.get_job_list(@parent, @plugin, @arry_mod, @child)
assert_eq(rc.code, 200, "as HTTP code for get_jobs request")
jobs = nil
assert_nothing_raised do
jobs = JSON.parse(rc.parsed_response, symbolize_names: true)
end
assert_has_key?(jobs, :jobs)
new_job = jobs[:jobs].find {|i| i[:name] == @test_job}
assert_not_nil(new_job)
assert_has_key?(new_job, :status)
assert_not_eq_str(new_job[:status], "unknown", "job status is other than unknown")
assert_has_key?(new_job, :flags)
assert_array_include?(new_job[:flags], "JOB_FLG_STREAMING_PUSHED")
PASS()
TEST("child/module/jobs", "Get list of jobs direct from child (hops:0) and check its contents, check job updates")
rc = DynCfgHttpClient.get_job_list(@child, @plugin, @arry_mod, nil)
assert_eq(rc.code, 200, "as HTTP code for get_jobs request")
jobs = nil
assert_nothing_raised do
jobs = JSON.parse(rc.parsed_response, symbolize_names: true)
end
assert_has_key?(jobs, :jobs)
new_job = jobs[:jobs].find {|i| i[:name] == @test_job}
assert_not_nil(new_job)
assert_has_key?(new_job, :status)
assert_not_eq_str(new_job[:status], "unknown", "job status is other than unknown")
assert_has_key?(new_job, :flags)
assert_array_not_include?(new_job[:flags], "JOB_FLG_STREAMING_PUSHED") # this is plugin directly at child so it should not show this flag
PASS()
TEST("parent/child/single_module/jobs", "Attempt getting list of jobs from child (hops:1) trough parent on single module. Check it fails properly")
rc = DynCfgHttpClient.get_job_list(@parent, @plugin, @single_mod, @child)
assert_eq(rc.code, 400, "as HTTP code for get_jobs request")
assert_eq_str(rc.parsed_response, '400 - this module is not array type', "as HTTP code for get_jobs request on single module")
PASS()
created_job = SecureRandom.uuid
TEST("parent/child/module/cr_del_job", "Create and delete job on child (hops:1) trough parent")
# create new job
rc = DynCfgHttpClient.create_job(@parent, @plugin, @arry_mod, created_job, @@job_cfg, @child)
assert_eq_http_code(rc, 200, "as HTTP code for create_job request")
# check this job is in job list @parent
rc = DynCfgHttpClient.get_job_list(@parent, @plugin, @arry_mod, @child)
assert_eq(rc.code, 200, "as HTTP code for get_jobs request")
jobs = nil
assert_nothing_raised do
jobs = JSON.parse(rc.parsed_response, symbolize_names: true)
end
assert_has_key?(jobs, :jobs)
new_job = jobs[:jobs].find {|i| i[:name] == created_job}
assert_not_nil(new_job)
# check this job is in job list @child
rc = DynCfgHttpClient.get_job_list(@child, @plugin, @arry_mod, nil)
assert_eq(rc.code, 200, "as HTTP code for get_jobs request")
jobs = nil
assert_nothing_raised do
jobs = JSON.parse(rc.parsed_response, symbolize_names: true)
end
assert_has_key?(jobs, :jobs)
new_job = jobs[:jobs].find {|i| i[:name] == created_job}
assert_not_nil(new_job)
# check we can get job config back
rc = DynCfgHttpClient.get_job_config(@parent, @plugin, @arry_mod, created_job, @child)
assert_eq(rc.code, 200, "as HTTP code for get_job_config request")
assert_eq_str(rc.parsed_response.chomp!, @@job_cfg, "as job config")
# delete job
rc = DynCfgHttpClient.delete_job(@parent, @plugin, @arry_mod, created_job, @child)
assert_eq(rc.code, 200, "as HTTP code for delete_job request")
# Check it is not in parents job list anymore
rc = DynCfgHttpClient.get_job_list(@parent, @plugin, @arry_mod, @child)
assert_eq(rc.code, 200, "as HTTP code for get_jobs request")
jobs = nil
assert_nothing_raised do
jobs = JSON.parse(rc.parsed_response, symbolize_names: true)
end
assert_has_key?(jobs, :jobs)
new_job = jobs[:jobs].find {|i| i[:name] == created_job}
assert_nil(new_job)
# Check it is not in childs job list anymore
rc = DynCfgHttpClient.get_job_list(@child, @plugin, @arry_mod, nil)
assert_eq(rc.code, 200, "as HTTP code for get_jobs request")
jobs = nil
assert_nothing_raised do
jobs = JSON.parse(rc.parsed_response, symbolize_names: true)
end
assert_has_key?(jobs, :jobs)
new_job = jobs[:jobs].find {|i| i[:name] == created_job}
assert_nil(new_job)
PASS()
TEST("parent/child/module/del_undeletable_job", "Try delete job on child (child rejects), check failure case works (hops:1)")
# test if plugin rejects job deletion the job still remains in list as it should
rc = DynCfgHttpClient.delete_job(@parent, @plugin, @arry_mod, @test_job, @child)
assert_eq(rc.code, 500, "as HTTP code for delete_job request")
rc = DynCfgHttpClient.get_job_list(@parent, @plugin, @arry_mod, @child)
assert_eq(rc.code, 200, "as HTTP code for get_jobs request")
jobs = nil
assert_nothing_raised do
jobs = JSON.parse(rc.parsed_response, symbolize_names: true)
end
assert_has_key?(jobs, :jobs)
job = jobs[:jobs].find {|i| i[:name] == @test_job}
assert_not_nil(job)
PASS()
end
end
ParentChildTest.new.run()

View file

@ -1,266 +0,0 @@
#!/usr/bin/env ruby
require 'json'
require 'httparty'
require 'pastel'
require 'securerandom'
ARGV.length == 1 or raise "Usage: #{$0} <config file>"
config_file = ARGV[0]
File.exist?(config_file) or raise "File not found: #{config_file}"
$config = JSON.parse(File.read(config_file), symbolize_names: true)
$plugin_name = $config[:global][:test_plugin_name]
$pastel = Pastel.new
class TestRunner
attr_reader :stats
def initialize
@stats = {
:suites => 0,
:tests => 0,
:assertions => 0
}
@test = nil
end
def add_assertion()
@stats[:assertions] += 1
end
def FAIL(msg, exception = nil, loc = nil)
puts $pastel.red.bold(" ✕ FAIL")
STDERR.print " "
if loc
STDERR.print $pastel.yellow("@#{loc.path}:#{loc.lineno}: ")
else
STDERR.print $pastel.yellow("@#{caller_locations(1, 1).first.path}:#{caller_locations(1, 1).first.lineno}: ")
end
STDERR.puts msg
STDERR.puts exception.full_message(:highlight => true) if exception
STDERR.puts $pastel.yellow(" Backtrace:")
caller.each do |line|
STDERR.puts " #{line}"
end
exit 1
end
def PASS()
STDERR.puts $pastel.green.bold(" ✓ PASS")
@stats[:tests] += 1
@test = nil
end
def TEST_SUITE(name)
puts $pastel.bold("• TEST SUITE: \"#{name}\"")
@stats[:suites] += 1
end
def assert_no_test_running()
unless @test.nil?
STDERR.puts $pastel.red("\nFATAL: Test \"#{@test}\" did not call PASS() or FAIL()!")
exit 1
end
end
def TEST(name, description = nil)
assert_no_test_running()
@test = name
col = 0
txt = " ├─ T: #{name} "
col += txt.length
print $pastel.bold(txt)
tab = 50
rem = tab - (col % tab)
rem.times do putc ' ' end
col += rem
if (description)
txt = " - #{description} "
col += txt.length
print txt
tab = 180
rem = tab - (col % tab)
rem.times do putc '.' end
end
end
def FINALIZE()
assert_no_test_running()
end
end
$test_runner = TestRunner.new
def FAIL(msg, exception = nil, loc = nil)
$test_runner.FAIL(msg, exception, loc)
end
def PASS()
$test_runner.PASS()
end
def TEST_SUITE(name)
$test_runner.TEST_SUITE(name)
end
def TEST(name, description = nil)
$test_runner.TEST(name, description)
end
def assert_eq(got, expected, msg = nil)
unless got == expected
FAIL("Expected #{expected}, got #{got} #{msg ? "(#{msg})" : ""}", nil, caller_locations(1, 1).first)
end
$test_runner.add_assertion()
end
def assert_eq_http_code(got, expected, msg = nil)
unless got.code == expected
FAIL("Expected #{expected}, got #{got}. Server \"#{got.parsed_response}\" #{msg ? "(#{msg})" : ""}", nil, caller_locations(1, 1).first)
end
$test_runner.add_assertion()
end
def assert_eq_str(got, expected, msg = nil)
unless got == expected
FAIL("Strings do not match #{msg ? "(#{msg})" : ""}", nil, caller_locations(1, 1).first)
end
$test_runner.add_assertion()
end
def assert_not_eq_str(got, expected, msg = nil)
unless got != expected
FAIL("Strings shoud not match #{msg ? "(#{msg})" : ""}", nil, caller_locations(1, 1).first)
end
$test_runner.add_assertion()
end
def assert_nothing_raised()
begin
yield
rescue Exception => e
FAIL("Unexpected exception of type #{e.class} raised. Msg: \"#{e.message}\"", e, caller_locations(1, 1).first)
end
$test_runner.add_assertion()
end
def assert_has_key?(hash, key)
unless hash.has_key?(key)
FAIL("Expected key \"#{key}\" in hash", nil, caller_locations(1, 1).first)
end
$test_runner.add_assertion()
end
def assert_array_include?(array, value)
unless array.include?(value)
FAIL("Expected array to include \"#{value}\"", nil, caller_locations(1, 1).first)
end
$test_runner.add_assertion()
end
def assert_array_not_include?(array, value)
if array.include?(value)
FAIL("Expected array to not include \"#{value}\"", nil, caller_locations(1, 1).first)
end
$test_runner.add_assertion()
end
def assert_is_one_of(value, *values)
unless values.include?(value)
FAIL("Expected value to be one of #{values.join(", ")}", nil, caller_locations(1, 1).first)
end
$test_runner.add_assertion()
end
def assert_not_nil(value)
if value.nil?
FAIL("Expected value to not be nil", nil, caller_locations(1, 1).first)
end
$test_runner.add_assertion()
end
def assert_nil(value)
unless value.nil?
FAIL("Expected value to be nil", nil, caller_locations(1, 1).first)
end
$test_runner.add_assertion()
end
class DynCfgHttpClient
def self.protocol(cfg)
return cfg[:ssl] ? 'https://' : 'http://'
end
def self.url_base(host)
return "#{protocol(host)}#{host[:host]}:#{host[:port]}"
end
def self.get_url_cfg_base(host, child = nil)
url = url_base(host)
url += "/host/#{child[:mguid]}" if child
url += "/api/v2/config"
return url
end
def self.get_url_cfg_plugin(host, plugin, child = nil)
return get_url_cfg_base(host, child) + '/' + plugin
end
def self.get_url_cfg_module(host, plugin, mod, child = nil)
return get_url_cfg_plugin(host, plugin, child) + '/' + mod
end
def self.get_url_cfg_job(host, plugin, mod, job_id, child = nil)
return get_url_cfg_module(host, plugin, mod, child) + "/#{job_id}"
end
def self.get_plugin_list(host, child = nil)
begin
return HTTParty.get(get_url_cfg_base(host, child), verify: false, format: :plain)
rescue => e
FAIL(e.message, e)
end
end
def self.get_plugin_config(host, plugin, child = nil)
begin
return HTTParty.get(get_url_cfg_plugin(host, plugin, child), verify: false)
rescue => e
FAIL(e.message, e)
end
end
def self.set_plugin_config(host, plugin, cfg, child = nil)
begin
return HTTParty.put(get_url_cfg_plugin(host, plugin, child), verify: false, body: cfg)
rescue => e
FAIL(e.message, e)
end
end
def self.get_plugin_module_list(host, plugin, child = nil)
begin
return HTTParty.get(get_url_cfg_plugin(host, plugin, child) + "/modules", verify: false, format: :plain)
rescue => e
FAIL(e.message, e)
end
end
def self.get_job_list(host, plugin, mod, child = nil)
begin
return HTTParty.get(get_url_cfg_module(host, plugin, mod, child) + "/jobs", verify: false, format: :plain)
rescue => e
FAIL(e.message, e)
end
end
def self.create_job(host, plugin, mod, job_id, job_cfg, child = nil)
begin
return HTTParty.post(get_url_cfg_job(host, plugin, mod, job_id, child), verify: false, body: job_cfg)
rescue => e
FAIL(e.message, e)
end
end
def self.delete_job(host, plugin, mod, job_id, child = nil)
begin
return HTTParty.delete(get_url_cfg_job(host, plugin, mod, job_id, child), verify: false)
rescue => e
FAIL(e.message, e)
end
end
def self.get_job_config(host, plugin, mod, job_id, child = nil)
begin
return HTTParty.get(get_url_cfg_job(host, plugin, mod, job_id, child), verify: false, format: :plain)
rescue => e
FAIL(e.message, e)
end
end
def self.set_job_config(host, plugin, mod, job_id, job_cfg, child = nil)
begin
return HTTParty.put(get_url_cfg_job(host, plugin, mod, job_id, child), verify: false, body: job_cfg)
rescue => e
FAIL(e.message, e)
end
end
end
require_relative 'sub_tests/test_parent_child.rb'
$test_runner.FINALIZE()
puts $pastel.green.bold("All tests passed!")
puts ("Total #{$test_runner.stats[:assertions]} assertions, #{$test_runner.stats[:tests]} tests in #{$test_runner.stats[:suites]} suites")
exit 0

View file

@ -1,250 +0,0 @@
#!/usr/bin/env ruby
# bogus chart that we create just so there is at least one chart
CHART_TYPE = 'lines'
UPDATE_EVERY = 1
PRIORITY = 100000
CHART_NAME = 'number_of_processes'
DIMENSION_NAME = 'running'
$plugin_name = "external_plugin"
$plugin_version = "0.0.1"
$plugin_config = <<-HEREDOC
test_plugin_config
hableba hableba hableba
HEREDOC
$array_module_name = 'module_of_the_future'
$fixed_job_name = 'fixed_job'
$modules = {
$array_module_name => {
:type => :job_array,
:jobs => {
$fixed_job_name => {
:type => :fixed,
:config => <<-HEREDOC
fixed_job_config
HEREDOC
},
},
:config => <<-HEREDOC
module_of_the_future_config
HEREDOC
},
"module_of_the_future_single_type" => {
:type => :single,
:jobs => {},
:config => <<-HEREDOC
module_of_the_future_single_type_config
HEREDOC
}
}
def out(str)
$log.puts "2 NETDATA> #{str}"
$stdout.puts str
$stdout.flush
$log.flush
end
def log(str)
$log.puts "LOG > #{str}"
$log.flush
end
#TODO this is AI code, verify
def split_with_quotes(str)
result = []
current_word = ""
in_quotes = false
escaped = false
str.each_char do |char|
if char == '\\' && !escaped
escaped = true
next
end
if char == '"' && !escaped
in_quotes = !in_quotes
current_word << char
elsif char == ' ' && !in_quotes
result << current_word unless current_word.empty?
current_word = ""
else
current_word << char
end
escaped = false
end
result << current_word unless current_word.empty?
result
end
def print_startup_messages
out "DYNCFG_ENABLE #{$plugin_name}"
$modules.each do |name, module_config|
out "DYNCFG_REGISTER_MODULE #{name} #{module_config[:type]}"
end
out "CHART system.#{CHART_NAME} '' 'Number of running processes' 'processes' processes processes.#{CHART_NAME} #{CHART_TYPE} #{PRIORITY} #{UPDATE_EVERY}"
out "DIMENSION #{DIMENSION_NAME} '' absolute 1 1"
$modules.each do |mod_name, mod|
next unless mod[:type] == :job_array
mod[:jobs].each do |job_name, job|
next unless job[:type] == :fixed
out "DYNCFG_REGISTER_JOB #{mod_name} #{job_name} stock 0"
out "REPORT_JOB_STATUS #{$array_module_name} #{$fixed_job_name} running 0"
end
end
end
def function_result(txid, msg, result)
out "FUNCTION_RESULT_BEGIN #{txid} #{result} text/plain 5"
out msg
out "FUNCTION_RESULT_END"
end
def process_payload_function(params)
log "payload function #{params[:fncname]}, #{params[:fncparams]}"
fnc_name, mod_name, job_name = params[:fncparams]
case fnc_name
when 'set_plugin_config'
$plugin_config = params[:payload]
function_result(params[:txid], "plugin config set", 1)
when 'set_module_config'
mod = $modules[mod_name]
return function_result(params[:txid], "no such module", 0) if mod.nil?
mod[:config] = params[:payload]
function_result(params[:txid], "module config set", 1)
when 'set_job_config'
mod = $modules[mod_name]
return function_result(params[:txid], "no such module", 0) if mod.nil?
job = mod[:jobs][job_name]
if job.nil?
job = Hash.new if job.nil?
job[:type] = :dynamic
mod[:jobs][job_name] = job
end
job[:config] = params[:payload]
function_result(params[:txid], "job config set", 1)
end
end
def process_function(params)
log "normal function #{params[:fncname]}, #{params[:fncparams]}"
fnc_name, mod_name, job_name = params[:fncparams]
case fnc_name
when 'get_plugin_config'
function_result(params[:txid], $plugin_config, 1)
when 'get_module_config'
return function_result(params[:txid], "no such module", 0) unless $modules.has_key?(mod_name)
function_result(params[:txid], $modules[mod_name][:config], 1)
when 'get_job_config'
mod = $modules[mod_name]
return function_result(params[:txid], "no such module", 0) if mod.nil?
job = mod[:jobs][job_name]
return function_result(params[:txid], "no such job", 0) if job.nil?
function_result(params[:txid], job[:config], 1)
when 'delete_job'
mod = $modules[mod_name]
return function_result(params[:txid], "no such module", 0) if mod.nil?
job = mod[:jobs][job_name]
return function_result(params[:txid], "no such job", 0) if job.nil?
if job[:type] == :fixed
return function_result(params[:txid], "this job can't be deleted", 0)
else
mod[:jobs].delete(job_name)
function_result(params[:txid], "job deleted", 1)
end
end
end
$inflight_incoming = nil
def process_input(input)
words = split_with_quotes(input)
unless $inflight_incoming.nil?
if input == "FUNCTION_PAYLOAD_END"
log $inflight_incoming[:payload]
process_payload_function($inflight_incoming)
$inflight_incoming = nil
else
$inflight_incoming[:payload] << input
$inflight_incoming[:payload] << "\n"
end
return
end
case words[0]
when "FUNCTION", "FUNCTION_PAYLOAD"
params = {}
params[:command] = words[0]
params[:txid] = words[1]
params[:timeout] = words[2].to_i
params[:fncname] = words[3]
params[:fncname] = params[:fncname][1..-2] if params[:fncname].start_with?('"') && params[:fncname].end_with?('"')
if params[:command] == "FUNCTION_PAYLOAD"
$inflight_incoming = Hash.new
params[:fncparams] = split_with_quotes(params[:fncname])
params[:fncname] = params[:fncparams][0]
$inflight_incoming[:txid] = params[:txid]
$inflight_incoming[:fncname] = params[:fncname]
$inflight_incoming[:params] = params
$inflight_incoming[:fncparams] = params[:fncparams]
$inflight_incoming[:payload] = ""
else
params[:fncparams] = split_with_quotes(params[:fncname])
params[:fncname] = params[:fncparams][0]
process_function(params)
end
end
end
def read_and_output_metric
processes = `ps -e | wc -l`.to_i - 1 # -1 to exclude the header line
timestamp = Time.now.to_i
puts "BEGIN system.#{CHART_NAME}"
puts "SET #{DIMENSION_NAME} = #{processes}"
puts "END"
end
def the_main
$stderr.reopen("/tmp/test_plugin_err.log", "w")
$log = File.open("/tmp/test_plugin.log", "w")
$log.puts "Starting plugin"
print_startup_messages
$log.puts "init done"
$log.flush
last_metric_time = Time.now
loop do
time_since_last_metric = Time.now - last_metric_time
# If it's been more than 1 second since we collected metrics, collect them now
if time_since_last_metric >= 1
read_and_output_metric
last_metric_time = Time.now
end
# Use select to wait for input, but only wait up to the time remaining until we need to collect metrics again
remaining_time = [1 - time_since_last_metric, 0].max
if select([$stdin], nil, nil, remaining_time)
input = $stdin.gets
next if input.class != String
input.chomp!
$log.puts "RAW INPUT< #{input}"
$log.flush
process_input(input)
end
end
end
the_main if __FILE__ == $PROGRAM_NAME

View file

@ -2,7 +2,7 @@
#include "functions_evloop.h"
#define MAX_FUNCTION_PARAMETERS 1024
static void functions_evloop_config_cb(const char *transaction, char *function, usec_t *stop_monotonic_ut, bool *cancelled, BUFFER *payload, const char *source, void *data);
struct functions_evloop_worker_job {
bool used;
@ -12,13 +12,26 @@ struct functions_evloop_worker_job {
char *cmd;
const char *transaction;
time_t timeout;
BUFFER *payload;
const char *source;
functions_evloop_worker_execute_t cb;
void *cb_data;
};
static void worker_job_cleanup(struct functions_evloop_worker_job *j) {
freez((void *)j->cmd);
freez((void *)j->transaction);
freez((void *)j->source);
buffer_free(j->payload);
}
struct rrd_functions_expectation {
const char *function;
size_t function_length;
functions_evloop_worker_execute_t cb;
void *cb_data;
time_t default_timeout;
struct rrd_functions_expectation *prev, *next;
};
@ -37,6 +50,10 @@ struct functions_evloop_globals {
netdata_thread_t reader_thread;
netdata_thread_t *worker_threads;
struct {
DICTIONARY *nodes;
} dyncfg;
struct rrd_functions_expectation *expectations;
};
@ -73,7 +90,7 @@ static void *rrd_functions_worker_globals_worker_main(void *arg) {
last_acquired = true;
j = dictionary_acquired_item_value(acquired);
j->cb(j->transaction, j->cmd, &j->stop_monotonic_ut, &j->cancelled);
j->cb(j->transaction, j->cmd, &j->stop_monotonic_ut, &j->cancelled, j->payload, j->source, j->cb_data);
dictionary_del(wg->worker_queue, j->transaction);
dictionary_acquired_item_release(wg->worker_queue, acquired);
dictionary_garbage_collect(wg->worker_queue);
@ -84,73 +101,145 @@ static void *rrd_functions_worker_globals_worker_main(void *arg) {
return NULL;
}
static void worker_add_job(struct functions_evloop_globals *wg, const char *keyword, char *transaction, char *function, char *timeout_s, BUFFER *payload, const char *source) {
if(!transaction || !*transaction || !timeout_s || !*timeout_s || !function || !*function) {
nd_log(NDLS_COLLECTORS, NDLP_ERR, "Received incomplete %s (transaction = '%s', timeout = '%s', function = '%s'). Ignoring it.",
keyword,
transaction?transaction:"(unset)",
timeout_s?timeout_s:"(unset)",
function?function:"(unset)");
}
else {
int timeout = str2i(timeout_s);
const char *msg = "No function with this name found";
bool found = false;
struct rrd_functions_expectation *we;
for(we = wg->expectations; we ;we = we->next) {
if(strncmp(function, we->function, we->function_length) == 0) {
if(timeout <= 0)
timeout = (int)we->default_timeout;
struct functions_evloop_worker_job t = {
.cmd = strdupz(function),
.transaction = strdupz(transaction),
.running = false,
.cancelled = false,
.timeout = timeout,
.stop_monotonic_ut = now_monotonic_usec() + (timeout * USEC_PER_SEC),
.used = false,
.payload = buffer_dup(payload),
.source = source ? strdupz(source) : NULL,
.cb = we->cb,
.cb_data = we->cb_data,
};
struct functions_evloop_worker_job *j = dictionary_set(wg->worker_queue, transaction, &t, sizeof(t));
if(j->used) {
nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Received duplicate function transaction '%s'. Ignoring it.", transaction);
worker_job_cleanup(&t);
msg = "Duplicate function transaction. Ignoring it.";
}
else {
found = true;
j->used = true;
pthread_cond_signal(&wg->worker_cond_var);
}
}
}
if(!found) {
netdata_mutex_lock(wg->stdout_mutex);
pluginsd_function_json_error_to_stdout(transaction, HTTP_RESP_NOT_FOUND, msg);
netdata_mutex_unlock(wg->stdout_mutex);
}
}
}
static void *rrd_functions_worker_globals_reader_main(void *arg) {
struct functions_evloop_globals *wg = arg;
char buffer[PLUGINSD_LINE_MAX + 1];
struct {
size_t last_len; // to remember the last pos - do not use a pointer, the buffer may realloc...
bool enabled;
char *transaction;
char *function;
char *timeout_s;
char *source;
char *content_type;
} deferred = { 0 };
char *s = NULL;
while(!(*wg->plugin_should_exit) && (s = fgets(buffer, PLUGINSD_LINE_MAX, stdin))) {
struct buffered_reader reader = { 0 };
buffered_reader_init(&reader);
BUFFER *buffer = buffer_create(sizeof(reader.read_buffer) + 2, NULL);
while(!(*wg->plugin_should_exit)) {
if(unlikely(!buffered_reader_next_line(&reader, buffer))) {
buffered_reader_ret_t ret = buffered_reader_read_timeout(
&reader,
fileno((FILE *)stdin),
2 * 60 * MSEC_PER_SEC,
false
);
if(unlikely(ret != BUFFERED_READER_READ_OK && ret != BUFFERED_READER_READ_POLL_TIMEOUT))
break;
continue;
}
if(deferred.enabled) {
char *s = (char *)buffer_tostring(buffer);
if(strstr(&s[deferred.last_len], PLUGINSD_KEYWORD_FUNCTION_PAYLOAD_END "\n") != NULL) {
if(deferred.last_len > 0)
// remove the trailing newline from the buffer
deferred.last_len--;
s[deferred.last_len] = '\0';
buffer->len = deferred.last_len;
buffer->content_type = content_type_string2id(deferred.content_type);
worker_add_job(wg, PLUGINSD_KEYWORD_FUNCTION_PAYLOAD, deferred.transaction, deferred.function, deferred.timeout_s, buffer, deferred.source);
buffer_flush(buffer);
freez(deferred.transaction);
freez(deferred.function);
freez(deferred.timeout_s);
freez(deferred.source);
freez(deferred.content_type);
memset(&deferred, 0, sizeof(deferred));
}
else
deferred.last_len = buffer->len;
continue;
}
char *words[MAX_FUNCTION_PARAMETERS] = { NULL };
size_t num_words = quoted_strings_splitter_pluginsd(buffer, words, MAX_FUNCTION_PARAMETERS);
size_t num_words = quoted_strings_splitter_pluginsd((char *)buffer_tostring(buffer), words, MAX_FUNCTION_PARAMETERS);
const char *keyword = get_word(words, num_words, 0);
if(keyword && strcmp(keyword, PLUGINSD_KEYWORD_FUNCTION) == 0) {
if(keyword && (strcmp(keyword, PLUGINSD_KEYWORD_FUNCTION) == 0)) {
char *transaction = get_word(words, num_words, 1);
char *timeout_s = get_word(words, num_words, 2);
char *function = get_word(words, num_words, 3);
char *source = get_word(words, num_words, 4);
worker_add_job(wg, keyword, transaction, function, timeout_s, NULL, source);
}
else if(keyword && (strcmp(keyword, PLUGINSD_KEYWORD_FUNCTION_PAYLOAD) == 0)) {
char *transaction = get_word(words, num_words, 1);
char *timeout_s = get_word(words, num_words, 2);
char *function = get_word(words, num_words, 3);
char *source = get_word(words, num_words, 4);
char *content_type = get_word(words, num_words, 5);
if(!transaction || !*transaction || !timeout_s || !*timeout_s || !function || !*function) {
nd_log(NDLS_COLLECTORS, NDLP_ERR, "Received incomplete %s (transaction = '%s', timeout = '%s', function = '%s'). Ignoring it.",
keyword,
transaction?transaction:"(unset)",
timeout_s?timeout_s:"(unset)",
function?function:"(unset)");
}
else {
int timeout = str2i(timeout_s);
const char *msg = "No function with this name found";
bool found = false;
struct rrd_functions_expectation *we;
for(we = wg->expectations; we ;we = we->next) {
if(strncmp(function, we->function, we->function_length) == 0) {
if(timeout <= 0)
timeout = (int)we->default_timeout;
struct functions_evloop_worker_job t = {
.cmd = strdupz(function),
.transaction = strdupz(transaction),
.running = false,
.cancelled = false,
.timeout = timeout,
.stop_monotonic_ut = now_monotonic_usec() + (timeout * USEC_PER_SEC),
.used = false,
.cb = we->cb,
};
struct functions_evloop_worker_job *j = dictionary_set(wg->worker_queue, transaction, &t, sizeof(t));
if(j->used) {
nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Received duplicate function transaction '%s'. Ignoring it.", transaction);
freez((void *)t.cmd);
freez((void *)t.transaction);
msg = "Duplicate function transaction. Ignoring it.";
}
else {
found = true;
j->used = true;
pthread_cond_signal(&wg->worker_cond_var);
}
}
}
if(!found) {
netdata_mutex_lock(wg->stdout_mutex);
pluginsd_function_json_error_to_stdout(transaction, HTTP_RESP_NOT_FOUND, msg);
netdata_mutex_unlock(wg->stdout_mutex);
}
}
deferred.transaction = strdupz(transaction ? transaction : "");
deferred.timeout_s = strdupz(timeout_s ? timeout_s : "");
deferred.function = strdupz(function ? function : "");
deferred.source = strdupz(source ? source : "");
deferred.content_type = strdupz(content_type ? content_type : "");
deferred.last_len = 0;
deferred.enabled = true;
}
else if(keyword && strcmp(keyword, PLUGINSD_KEYWORD_FUNCTION_CANCEL) == 0) {
char *transaction = get_word(words, num_words, 1);
@ -180,20 +269,20 @@ static void *rrd_functions_worker_globals_reader_main(void *arg) {
}
else
nd_log(NDLS_COLLECTORS, NDLP_NOTICE, "Received unknown command: %s", keyword?keyword:"(unset)");
buffer_flush(buffer);
}
if(!s || feof(stdin) || ferror(stdin)) {
*wg->plugin_should_exit = true;
nd_log(NDLS_COLLECTORS, NDLP_ERR, "Received error on stdin.");
}
if(!(*wg->plugin_should_exit))
nd_log(NDLS_COLLECTORS, NDLP_ERR, "Read error on stdin");
*wg->plugin_should_exit = true;
exit(1);
}
void worker_queue_delete_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
struct functions_evloop_worker_job *j = value;
freez((void *)j->cmd);
freez((void *)j->transaction);
worker_job_cleanup(j);
}
struct functions_evloop_globals *functions_evloop_init(size_t worker_threads, const char *tag, netdata_mutex_t *stdout_mutex, bool *plugin_should_exit) {
@ -202,6 +291,8 @@ struct functions_evloop_globals *functions_evloop_init(size_t worker_threads, co
wg->worker_queue = dictionary_create(DICT_OPTION_DONT_OVERWRITE_VALUE);
dictionary_register_delete_callback(wg->worker_queue, worker_queue_delete_cb, NULL);
wg->dyncfg.nodes = dyncfg_nodes_dictionary_create();
pthread_mutex_init(&wg->worker_mutex, NULL);
pthread_cond_init(&wg->worker_cond_var, NULL);
@ -222,14 +313,17 @@ struct functions_evloop_globals *functions_evloop_init(size_t worker_threads, co
rrd_functions_worker_globals_worker_main, wg);
}
functions_evloop_add_function(wg, "config", functions_evloop_config_cb, 120, wg);
return wg;
}
void functions_evloop_add_function(struct functions_evloop_globals *wg, const char *function, functions_evloop_worker_execute_t cb, time_t default_timeout) {
void functions_evloop_add_function(struct functions_evloop_globals *wg, const char *function, functions_evloop_worker_execute_t cb, time_t default_timeout, void *data) {
struct rrd_functions_expectation *we = callocz(1, sizeof(*we));
we->function = function;
we->function_length = strlen(we->function);
we->cb = cb;
we->cb_data = data;
we->default_timeout = default_timeout;
DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(wg->expectations, we, prev, next);
}
@ -240,3 +334,84 @@ void functions_evloop_cancel_threads(struct functions_evloop_globals *wg){
netdata_thread_cancel(wg->reader_thread);
}
// ----------------------------------------------------------------------------
static void functions_evloop_config_cb(const char *transaction, char *function, usec_t *stop_monotonic_ut, bool *cancelled,
BUFFER *payload, const char *source, void *data) {
struct functions_evloop_globals *wg = data;
CLEAN_BUFFER *result = buffer_create(1024, NULL);
int code = dyncfg_node_find_and_call(wg->dyncfg.nodes, transaction, function, stop_monotonic_ut, cancelled, payload, source, result);
netdata_mutex_lock(wg->stdout_mutex);
pluginsd_function_result_begin_to_stdout(transaction, code, content_type_id2string(result->content_type), result->expires);
printf("%s", buffer_tostring(result));
pluginsd_function_result_end_to_stdout();
fflush(stdout);
netdata_mutex_unlock(wg->stdout_mutex);
}
void functions_evloop_dyncfg_add(struct functions_evloop_globals *wg, const char *id, const char *path, DYNCFG_STATUS status, DYNCFG_TYPE type, DYNCFG_SOURCE_TYPE source_type, const char *source, DYNCFG_CMDS cmds, dyncfg_cb_t cb, void *data) {
if(!dyncfg_is_valid_id(id)) {
nd_log(NDLS_COLLECTORS, NDLP_ERR, "DYNCFG: id '%s' is invalid. Ignoring dynamic configuration for it.", id);
return;
}
struct dyncfg_node tmp = {
.cmds = cmds,
.type = type,
.cb = cb,
.data = data,
};
dictionary_set(wg->dyncfg.nodes, id, &tmp, sizeof(tmp));
CLEAN_BUFFER *c = buffer_create(100, NULL);
dyncfg_cmds2buffer(cmds, c);
netdata_mutex_lock(wg->stdout_mutex);
fprintf(stdout,
PLUGINSD_KEYWORD_CONFIG " '%s' " PLUGINSD_KEYWORD_CONFIG_ACTION_CREATE " '%s' '%s' '%s' '%s' '%s' '%s'\n",
id, dyncfg_id2status(status), dyncfg_id2type(type), path,
dyncfg_id2source_type(source_type), source, buffer_tostring(c)
);
fflush(stdout);
netdata_mutex_unlock(wg->stdout_mutex);
}
void functions_evloop_dyncfg_del(struct functions_evloop_globals *wg, const char *id) {
if(!dyncfg_is_valid_id(id)) {
nd_log(NDLS_COLLECTORS, NDLP_ERR, "DYNCFG: id '%s' is invalid. Ignoring dynamic configuration for it.", id);
return;
}
dictionary_del(wg->dyncfg.nodes, id);
netdata_mutex_lock(wg->stdout_mutex);
fprintf(stdout,
PLUGINSD_KEYWORD_CONFIG " %s " PLUGINSD_KEYWORD_CONFIG_ACTION_DELETE "\n",
id);
fflush(stdout);
netdata_mutex_unlock(wg->stdout_mutex);
}
void functions_evloop_dyncfg_status(struct functions_evloop_globals *wg, const char *id, DYNCFG_STATUS status) {
if(!dyncfg_is_valid_id(id)) {
nd_log(NDLS_COLLECTORS, NDLP_ERR, "DYNCFG: id '%s' is invalid. Ignoring dynamic configuration for it.", id);
return;
}
netdata_mutex_lock(wg->stdout_mutex);
fprintf(stdout,
PLUGINSD_KEYWORD_CONFIG " %s " PLUGINSD_KEYWORD_CONFIG_ACTION_STATUS " %s\n",
id, dyncfg_id2status(status));
fflush(stdout);
netdata_mutex_unlock(wg->stdout_mutex);
}

View file

@ -5,6 +5,8 @@
#include "../libnetdata.h"
#define MAX_FUNCTION_PARAMETERS 1024
#define PLUGINSD_KEYWORD_CHART "CHART"
#define PLUGINSD_KEYWORD_CHART_DEFINITION_END "CHART_DEFINITION_END"
@ -28,6 +30,13 @@
#define PLUGINSD_KEYWORD_FUNCTION_RESULT_BEGIN "FUNCTION_RESULT_BEGIN"
#define PLUGINSD_KEYWORD_FUNCTION_RESULT_END "FUNCTION_RESULT_END"
#define PLUGINSD_KEYWORD_CONFIG "CONFIG"
#define PLUGINSD_KEYWORD_CONFIG_ACTION_CREATE "create"
#define PLUGINSD_KEYWORD_CONFIG_ACTION_DELETE "delete"
#define PLUGINSD_KEYWORD_CONFIG_ACTION_STATUS "status"
#define PLUGINSD_FUNCTION_CONFIG "config"
#define PLUGINSD_KEYWORD_REPLAY_CHART "REPLAY_CHART"
#define PLUGINSD_KEYWORD_REPLAY_BEGIN "RBEGIN"
#define PLUGINSD_KEYWORD_REPLAY_SET "RSET"
@ -44,21 +53,16 @@
#define PLUGINSD_KEYWORD_HOST_LABEL "HOST_LABEL"
#define PLUGINSD_KEYWORD_HOST "HOST"
#define PLUGINSD_KEYWORD_DYNCFG_ENABLE "DYNCFG_ENABLE"
#define PLUGINSD_KEYWORD_DYNCFG_REGISTER_MODULE "DYNCFG_REGISTER_MODULE"
#define PLUGINSD_KEYWORD_REPORT_JOB_STATUS "REPORT_JOB_STATUS"
#define PLUGINSD_KEYWORD_EXIT "EXIT"
#define PLUGINSD_KEYWORD_SLOT "SLOT" // to change the length of this, update pluginsd_extract_chart_slot() too
#define PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT 10 // seconds
typedef void (*functions_evloop_worker_execute_t)(const char *transaction, char *function, usec_t *stop_monotonic_ut, bool *cancelled);
typedef void (*functions_evloop_worker_execute_t)(const char *transaction, char *function, usec_t *stop_monotonic_ut, bool *cancelled, BUFFER *payload, const char *source, void *data);
struct functions_evloop_worker_job;
struct functions_evloop_globals *functions_evloop_init(size_t worker_threads, const char *tag, netdata_mutex_t *stdout_mutex, bool *plugin_should_exit);
void functions_evloop_add_function(struct functions_evloop_globals *wg, const char *function, functions_evloop_worker_execute_t cb, time_t default_timeout);
void functions_evloop_add_function(struct functions_evloop_globals *wg, const char *function, functions_evloop_worker_execute_t cb, time_t default_timeout, void *data);
void functions_evloop_cancel_threads(struct functions_evloop_globals *wg);
#define FUNCTIONS_EXTENDED_TIME_ON_PROGRESS_UT (10 * USEC_PER_SEC)
@ -119,4 +123,8 @@ static inline void pluginsd_function_progress_to_stdout(const char *transaction,
fflush(stdout);
}
void functions_evloop_dyncfg_add(struct functions_evloop_globals *wg, const char *id, const char *path, DYNCFG_STATUS status, DYNCFG_TYPE type, DYNCFG_SOURCE_TYPE source_type, const char *source, DYNCFG_CMDS cmds, dyncfg_cb_t cb, void *data);
void functions_evloop_dyncfg_del(struct functions_evloop_globals *wg, const char *id);
void functions_evloop_dyncfg_status(struct functions_evloop_globals *wg, const char *id, DYNCFG_STATUS status);
#endif //NETDATA_FUNCTIONS_EVLOOP_H

View file

@ -0,0 +1,96 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "content_type.h"
static struct {
const char *format;
HTTP_CONTENT_TYPE content_type;
bool needs_charset;
const char *options;
} content_types[] = {
// primary - preferred during id-to-string conversions
{ .format = "text/html", CT_TEXT_HTML, true },
{ .format = "text/plain", CT_TEXT_PLAIN, true },
{ .format = "text/css", CT_TEXT_CSS, true },
{ .format = "text/yaml", CT_TEXT_YAML, true },
{ .format = "text/xml", CT_TEXT_XML, true },
{ .format = "text/xsl", CT_TEXT_XSL, true },
{ .format = "application/json", CT_APPLICATION_JSON, true },
{ .format = "application/xml", CT_APPLICATION_XML, true },
{ .format = "application/javascript", CT_APPLICATION_X_JAVASCRIPT, true },
{ .format = "application/octet-stream", CT_APPLICATION_OCTET_STREAM, false },
{ .format = "image/svg+xml", CT_IMAGE_SVG_XML, false },
{ .format = "application/x-font-truetype", CT_APPLICATION_X_FONT_TRUETYPE, false },
{ .format = "application/x-font-opentype", CT_APPLICATION_X_FONT_OPENTYPE, false },
{ .format = "application/font-woff", CT_APPLICATION_FONT_WOFF, false },
{ .format = "application/font-woff2", CT_APPLICATION_FONT_WOFF2, false },
{ .format = "application/vnd.ms-fontobject",CT_APPLICATION_VND_MS_FONTOBJ, false },
{ .format = "image/png", CT_IMAGE_PNG, false },
{ .format = "image/jpeg", CT_IMAGE_JPG, false },
{ .format = "image/gif", CT_IMAGE_GIF, false },
{ .format = "image/x-icon", CT_IMAGE_XICON, false },
{ .format = "image/bmp", CT_IMAGE_BMP, false },
{ .format = "image/icns", CT_IMAGE_ICNS, false },
{ .format = "audio/mpeg", CT_AUDIO_MPEG, false },
{ .format = "audio/ogg", CT_AUDIO_OGG, false },
{ .format = "video/mp4", CT_VIDEO_MP4, false },
{ .format = "application/pdf", CT_APPLICATION_PDF, false },
{ .format = "application/zip", CT_APPLICATION_ZIP, false },
{ .format = "image/png", CT_IMAGE_PNG, false },
// secondary - overlapping with primary
{ .format = "text/plain", CT_PROMETHEUS, false, "version=0.0.4" },
{ .format = "prometheus", CT_PROMETHEUS },
{ .format = "text", CT_TEXT_PLAIN },
{ .format = "txt", CT_TEXT_PLAIN },
{ .format = "json", CT_APPLICATION_JSON },
{ .format = "html", CT_TEXT_HTML },
{ .format = "xml", CT_APPLICATION_XML },
// terminator
{ .format = NULL, CT_TEXT_PLAIN },
};
HTTP_CONTENT_TYPE content_type_string2id(const char *format) {
if(format && *format) {
for (int i = 0; content_types[i].format; i++)
if (strcmp(content_types[i].format, format) == 0)
return content_types[i].content_type;
}
return CT_TEXT_PLAIN;
}
const char *content_type_id2string(HTTP_CONTENT_TYPE content_type) {
for (int i = 0; content_types[i].format; i++)
if (content_types[i].content_type == content_type)
return content_types[i].format;
return "text/plain";
}
void http_header_content_type(BUFFER *wb, HTTP_CONTENT_TYPE content_type) {
buffer_strcat(wb, "Content-Type: ");
for (int i = 0; content_types[i].format; i++) {
if (content_types[i].content_type == content_type) {
buffer_strcat(wb, content_types[i].format);
if(content_types[i].needs_charset) {
buffer_strcat(wb, "; charset=utf-8");
}
if(content_types[i].options) {
buffer_strcat(wb, "; ");
buffer_strcat(wb, content_types[i].options);
}
buffer_strcat(wb, "\r\n");
return;
}
}
buffer_strcat(wb, "text/plain; charset=utf-8\r\n");
}

View file

@ -0,0 +1,45 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#ifndef NETDATA_CONTENT_TYPE_H
#define NETDATA_CONTENT_TYPE_H
typedef enum __attribute__ ((__packed__)) {
CT_NONE = 0,
CT_APPLICATION_JSON,
CT_TEXT_PLAIN,
CT_TEXT_HTML,
CT_APPLICATION_X_JAVASCRIPT,
CT_TEXT_CSS,
CT_TEXT_XML,
CT_APPLICATION_XML,
CT_TEXT_XSL,
CT_APPLICATION_OCTET_STREAM,
CT_APPLICATION_X_FONT_TRUETYPE,
CT_APPLICATION_X_FONT_OPENTYPE,
CT_APPLICATION_FONT_WOFF,
CT_APPLICATION_FONT_WOFF2,
CT_APPLICATION_VND_MS_FONTOBJ,
CT_IMAGE_SVG_XML,
CT_IMAGE_PNG,
CT_IMAGE_JPG,
CT_IMAGE_GIF,
CT_IMAGE_XICON,
CT_IMAGE_ICNS,
CT_IMAGE_BMP,
CT_PROMETHEUS,
CT_AUDIO_MPEG,
CT_AUDIO_OGG,
CT_VIDEO_MP4,
CT_APPLICATION_PDF,
CT_APPLICATION_ZIP,
CT_TEXT_YAML,
} HTTP_CONTENT_TYPE;
HTTP_CONTENT_TYPE content_type_string2id(const char *format);
const char *content_type_id2string(HTTP_CONTENT_TYPE content_type);
#include "../libnetdata.h"
void http_header_content_type(struct web_buffer *wb, HTTP_CONTENT_TYPE type);
#endif //NETDATA_CONTENT_TYPE_H

View file

@ -5,34 +5,39 @@
static struct {
HTTP_ACCESS access;
const char *name;
} rrd_function_access_levels[] = {
{ .access = HTTP_ACCESS_NONE, .name = "none" },
{ .access = HTTP_ACCESS_MEMBERS, .name = "members" },
{ .access = HTTP_ACCESS_ADMINS, .name = "admins" },
{ .access = HTTP_ACCESS_ANY, .name = "any" },
} access_levels[] = {
{ .access = HTTP_ACCESS_NONE, .name = "none" },
{ .access = HTTP_ACCESS_MEMBER, .name = "member" },
{ .access = HTTP_ACCESS_ADMIN, .name = "admin" },
{ .access = HTTP_ACCESS_ANY, .name = "any" },
{ .access = HTTP_ACCESS_MEMBER, .name = "members" },
{ .access = HTTP_ACCESS_ADMIN, .name = "admins" },
{ .access = HTTP_ACCESS_ANY, .name = "all" },
// terminator
{ .access = 0, .name = NULL },
};
HTTP_ACCESS http_access2id(const char *access) {
if(!access || !*access)
return HTTP_ACCESS_MEMBERS;
return HTTP_ACCESS_MEMBER;
size_t entries = sizeof(rrd_function_access_levels) / sizeof(rrd_function_access_levels[0]);
for(size_t i = 0; i < entries ;i++) {
if(strcmp(rrd_function_access_levels[i].name, access) == 0)
return rrd_function_access_levels[i].access;
for(size_t i = 0; access_levels[i].name ;i++) {
if(strcmp(access_levels[i].name, access) == 0)
return access_levels[i].access;
}
nd_log(NDLS_DAEMON, NDLP_WARNING, "HTTP access level '%s' is not valid", access);
return HTTP_ACCESS_MEMBERS;
return HTTP_ACCESS_NONE;
}
const char *http_id2access(HTTP_ACCESS access) {
size_t entries = sizeof(rrd_function_access_levels) / sizeof(rrd_function_access_levels[0]);
for(size_t i = 0; i < entries ;i++) {
if(access == rrd_function_access_levels[i].access)
return rrd_function_access_levels[i].name;
for(size_t i = 0; access_levels[i].name ;i++) {
if(access == access_levels[i].access)
return access_levels[i].name;
}
nd_log(NDLS_DAEMON, NDLP_WARNING, "HTTP access level %d is not valid", access);
return "members";
return "none";
}

View file

@ -5,8 +5,8 @@
typedef enum __attribute__((packed)) {
HTTP_ACCESS_NONE = 0,
HTTP_ACCESS_ADMINS = 1,
HTTP_ACCESS_MEMBERS = 2,
HTTP_ACCESS_ADMIN = 1,
HTTP_ACCESS_MEMBER = 2,
HTTP_ACCESS_ANY = 3,
// keep this list so that lower numbers are more strict access levels

View file

@ -210,6 +210,8 @@ static struct {
, { "bmp" , 0 , CT_IMAGE_BMP }
, { "ico" , 0 , CT_IMAGE_XICON }
, { "icns" , 0 , CT_IMAGE_ICNS }
// terminator
, { NULL , 0 , 0 }
};

View file

@ -12,6 +12,7 @@
// HTTP_CODES 2XX Success
#define HTTP_RESP_OK 200
#define HTTP_RESP_ACCEPTED 202
// HTTP_CODES 3XX Redirections
#define HTTP_RESP_MOVED_PERM 301
@ -33,6 +34,7 @@
// HTTP_CODES 5XX Server Errors
#define HTTP_RESP_INTERNAL_SERVER_ERROR 500
#define HTTP_RESP_NOT_IMPLEMENTED 501
#define HTTP_RESP_SERVICE_UNAVAILABLE 503
#define HTTP_RESP_GATEWAY_TIMEOUT 504
#define HTTP_RESP_BACKEND_RESPONSE_INVALID 591

View file

@ -597,4 +597,40 @@ static inline char *trim_all(char *buffer) {
return buffer;
}
static inline bool streq(const char *a, const char *b) {
if (a == b)
return true;
if (a == NULL || b == NULL)
return false;
return strcmp(a, b) == 0;
}
static inline bool strstartswith(const char *string, const char *prefix) {
if (string == NULL || prefix == NULL)
return false;
size_t string_len = strlen(string);
size_t prefix_len = strlen(prefix);
if (prefix_len > string_len)
return false;
return strncmp(string, prefix, prefix_len) == 0;
}
static inline bool strendswith(const char *string, const char *suffix) {
if (string == NULL || suffix == NULL)
return false;
size_t string_len = strlen(string);
size_t suffix_len = strlen(suffix);
if (suffix_len > string_len)
return false;
return strcmp(string + string_len - suffix_len, suffix) == 0;
}
#endif //NETDATA_INLINED_H

View file

@ -1,7 +1,6 @@
#ifndef CHECKIN_JSON_H
#define CHECKIN_JSON_H 1
#if ENABLE_JSONC
#include <json-c/json.h>
// fix an older json-c bug
@ -72,6 +71,10 @@ size_t json_walk_primitive(char *js, jsmntok_t *t, size_t start, JSON_ENTRY *e);
int json_callback_print(JSON_ENTRY *e);
static inline void cleanup_json_object_pp(struct json_object **jobj) {
if(*jobj)
json_object_put(*jobj);
}
#define CLEAN_JSON_OBJECT _cleanup_(cleanup_json_object_pp) struct json_object
#endif
#endif // CHECKIN_JSON_H

View file

@ -129,6 +129,9 @@ static void (*libc_free)(void *) = free_first_run;
static char *strdup_first_run(const char *s);
static char *(*libc_strdup)(const char *) = strdup_first_run;
static char *strndup_first_run(const char *s, size_t len);
static char *(*libc_strndup)(const char *, size_t) = strndup_first_run;
static size_t malloc_usable_size_first_run(void *ptr);
#ifdef HAVE_MALLOC_USABLE_SIZE
static size_t (*libc_malloc_usable_size)(void *) = malloc_usable_size_first_run;
@ -169,6 +172,11 @@ static char *strdup_first_run(const char *s) {
return libc_strdup(s);
}
static char *strndup_first_run(const char *s, size_t len) {
link_system_library_function((libc_function_t *) &libc_strndup, "strndup", true);
return libc_strndup(s, len);
}
static size_t malloc_usable_size_first_run(void *ptr) {
link_system_library_function((libc_function_t *) &libc_malloc_usable_size, "malloc_usable_size", false);
@ -202,6 +210,10 @@ char *strdup(const char *s) {
return strdupz(s);
}
char *strndup(const char *s, size_t len) {
return strndupz(s, len);
}
size_t malloc_usable_size(void *ptr) {
return mallocz_usable_size(ptr);
}
@ -365,6 +377,30 @@ char *strdupz_int(const char *s, const char *file, const char *function, size_t
return (char *)&t->data;
}
char *strndupz_int(const char *s, size_t len, const char *file, const char *function, size_t line) {
struct malloc_trace *p = malloc_trace_find_or_create(file, function, line);
size_t size = len + 1;
size_t_atomic_count(add, p->strdup_calls, 1);
size_t_atomic_count(add, p->allocations, 1);
size_t_atomic_bytes(add, p->bytes, size);
struct malloc_header *t = (struct malloc_header *)libc_malloc(malloc_header_size + size);
if (unlikely(!t)) fatal("strndupz() cannot allocate %zu bytes of memory (%zu with header).", size, malloc_header_size + size);
t->signature.magic = 0x0BADCAFE;
t->signature.trace = p;
t->signature.size = size;
#ifdef NETDATA_INTERNAL_CHECKS
for(ssize_t i = 0; i < (ssize_t)sizeof(t->padding) ;i++) // signed to avoid compiler warning when zero-padded
t->padding[i] = 0xFF;
#endif
memcpy(&t->data, s, size);
t->data[len] = '\0';
return (char *)&t->data;
}
static struct malloc_header *malloc_get_header(void *ptr, const char *caller, const char *file, const char *function, size_t line) {
uint8_t *ret = (uint8_t *)ptr - malloc_header_size;
struct malloc_header *t = (struct malloc_header *)ret;
@ -450,6 +486,12 @@ char *strdupz(const char *s) {
return t;
}
char *strndupz(const char *s, size_t len) {
char *t = strndup(s, len);
if (unlikely(!t)) fatal("Cannot strndup() string '%s' of len %zu", s, len);
return t;
}
// If ptr is NULL, no operation is performed.
void freez(void *ptr) {
free(ptr);

View file

@ -242,6 +242,11 @@ size_t judy_aral_structures(void);
#define ABS(x) (((x) < 0)? (-(x)) : (x))
#define MIN(a,b) (((a)<(b))?(a):(b))
#define MAX(a,b) (((a)>(b))?(a):(b))
#define SWAP(a, b) do { \
typeof(a) _tmp = b; \
b = a; \
a = _tmp; \
} while(0)
#define GUID_LEN 36
@ -515,6 +520,7 @@ int snprintfz(char *dst, size_t n, const char *fmt, ...) PRINTFLIKE(3, 4);
int malloc_trace_walkthrough(int (*callback)(void *item, void *data), void *data);
#define strdupz(s) strdupz_int(s, __FILE__, __FUNCTION__, __LINE__)
#define strndupz(s, len) strndupz_int(s, len, __FILE__, __FUNCTION__, __LINE__)
#define callocz(nmemb, size) callocz_int(nmemb, size, __FILE__, __FUNCTION__, __LINE__)
#define mallocz(size) mallocz_int(size, __FILE__, __FUNCTION__, __LINE__)
#define reallocz(ptr, size) reallocz_int(ptr, size, __FILE__, __FUNCTION__, __LINE__)
@ -522,6 +528,7 @@ int malloc_trace_walkthrough(int (*callback)(void *item, void *data), void *data
#define mallocz_usable_size(ptr) mallocz_usable_size_int(ptr, __FILE__, __FUNCTION__, __LINE__)
char *strdupz_int(const char *s, const char *file, const char *function, size_t line);
char *strndupz_int(const char *s, size_t len, const char *file, const char *function, size_t line);
void *callocz_int(size_t nmemb, size_t size, const char *file, const char *function, size_t line);
void *mallocz_int(size_t size, const char *file, const char *function, size_t line);
void *reallocz_int(void *ptr, size_t size, const char *file, const char *function, size_t line);
@ -530,6 +537,7 @@ size_t mallocz_usable_size_int(void *ptr, const char *file, const char *function
#else // NETDATA_TRACE_ALLOCATIONS
char *strdupz(const char *s) MALLOCLIKE NEVERNULL;
char *strndupz(const char *s, size_t len) MALLOCLIKE NEVERNULL;
void *callocz(size_t nmemb, size_t size) MALLOCLIKE NEVERNULL;
void *mallocz(size_t size) MALLOCLIKE NEVERNULL;
void *reallocz(void *ptr, size_t size) MALLOCLIKE NEVERNULL;
@ -702,6 +710,8 @@ extern char *netdata_configured_host_prefix;
#include "uuid/uuid.h"
#include "http/http_access.h"
#include "http/content_type.h"
#include "config/dyncfg.h"
#include "libjudy/src/Judy.h"
#include "july/july.h"
#include "os.h"
@ -747,7 +757,6 @@ extern char *netdata_configured_host_prefix;
#include "http/http_defs.h"
#include "gorilla/gorilla.h"
#include "facets/facets.h"
#include "dyn_conf/dyn_conf.h"
#include "functions_evloop/functions_evloop.h"
#include "query_progress/progress.h"
@ -901,6 +910,17 @@ bool rrdr_relative_window_to_absolute_query(time_t *after, time_t *before, time_
int netdata_base64_decode(const char *encoded, char *decoded, size_t decoded_size);
static inline void freez_charp(char **p) {
freez(*p);
}
static inline void freez_const_charp(const char **p) {
freez((void *)*p);
}
#define CLEAN_CONST_CHAR_P _cleanup_(freez_const_charp) const char
#define CLEAN_CHAR_P _cleanup_(freez_charp) char
# ifdef __cplusplus
}
# endif

View file

@ -1116,9 +1116,21 @@ static __thread struct log_field thread_log_fields[_NDF_MAX] = {
.journal = "ND_SRC_TRANSPORT",
.logfmt = "src_transport",
},
[NDF_ACCOUNT_ID] = {
.journal = "ND_ACCOUNT_ID",
.logfmt = "account",
},
[NDF_USER_NAME] = {
.journal = "ND_USER_NAME",
.logfmt = "user",
},
[NDF_USER_ROLE] = {
.journal = "ND_USER_ROLE",
.logfmt = "role",
},
[NDF_SRC_IP] = {
.journal = "ND_SRC_IP",
.logfmt = "src_ip",
.journal = "ND_SRC_IP",
.logfmt = "src_ip",
},
[NDF_SRC_FORWARDED_HOST] = {
.journal = "ND_SRC_FORWARDED_HOST",
@ -1360,11 +1372,12 @@ static void nd_logger_json(BUFFER *wb, struct log_field *fields, size_t fields_m
case NDFT_DBL:
buffer_json_member_add_double(wb, key, fields[i].entry.dbl);
break;
case NDFT_UUID:{
char u[UUID_COMPACT_STR_LEN];
uuid_unparse_lower_compact(*fields[i].entry.uuid, u);
buffer_json_member_add_string(wb, key, u);
}
case NDFT_UUID:
if(!uuid_is_null(*fields[i].entry.uuid)) {
char u[UUID_COMPACT_STR_LEN];
uuid_unparse_lower_compact(*fields[i].entry.uuid, u);
buffer_json_member_add_string(wb, key, u);
}
break;
case NDFT_CALLBACK: {
if(!tmp)
@ -1689,13 +1702,14 @@ static void nd_logger_logfmt(BUFFER *wb, struct log_field *fields, size_t fields
buffer_fast_strcat(wb, "=", 1);
buffer_print_netdata_double(wb, fields[i].entry.dbl);
break;
case NDFT_UUID: {
char u[UUID_COMPACT_STR_LEN];
uuid_unparse_lower_compact(*fields[i].entry.uuid, u);
buffer_strcat(wb, key);
buffer_fast_strcat(wb, "=", 1);
buffer_fast_strcat(wb, u, sizeof(u) - 1);
}
case NDFT_UUID:
if(!uuid_is_null(*fields[i].entry.uuid)) {
char u[UUID_COMPACT_STR_LEN];
uuid_unparse_lower_compact(*fields[i].entry.uuid, u);
buffer_strcat(wb, key);
buffer_fast_strcat(wb, "=", 1);
buffer_fast_strcat(wb, u, sizeof(u) - 1);
}
break;
case NDFT_CALLBACK: {
if(!tmp)
@ -1786,11 +1800,12 @@ static bool nd_logger_journal_libsystemd(struct log_field *fields, size_t fields
case NDFT_DBL:
rc = asprintf(&value, "%s=%f", key, fields[i].entry.dbl);
break;
case NDFT_UUID: {
char u[UUID_COMPACT_STR_LEN];
uuid_unparse_lower_compact(*fields[i].entry.uuid, u);
rc = asprintf(&value, "%s=%s", key, u);
}
case NDFT_UUID:
if(!uuid_is_null(*fields[i].entry.uuid)) {
char u[UUID_COMPACT_STR_LEN];
uuid_unparse_lower_compact(*fields[i].entry.uuid, u);
rc = asprintf(&value, "%s=%s", key, u);
}
break;
case NDFT_CALLBACK: {
if(!tmp)
@ -1884,14 +1899,15 @@ static bool nd_logger_journal_direct(struct log_field *fields, size_t fields_max
buffer_print_netdata_double(wb, fields[i].entry.dbl);
buffer_putc(wb, '\n');
break;
case NDFT_UUID:{
char u[UUID_COMPACT_STR_LEN];
uuid_unparse_lower_compact(*fields[i].entry.uuid, u);
buffer_strcat(wb, key);
buffer_putc(wb, '=');
buffer_fast_strcat(wb, u, sizeof(u) - 1);
buffer_putc(wb, '\n');
}
case NDFT_UUID:
if(!uuid_is_null(*fields[i].entry.uuid)) {
char u[UUID_COMPACT_STR_LEN];
uuid_unparse_lower_compact(*fields[i].entry.uuid, u);
buffer_strcat(wb, key);
buffer_putc(wb, '=');
buffer_fast_strcat(wb, u, sizeof(u) - 1);
buffer_putc(wb, '\n');
}
break;
case NDFT_CALLBACK: {
if(!tmp)
@ -2089,7 +2105,7 @@ static void nd_logger_merge_log_stack_to_thread_fields(void) {
if((type == NDFT_TXT && (!e->txt || !*e->txt)) ||
(type == NDFT_BFR && (!e->bfr || !buffer_strlen(e->bfr))) ||
(type == NDFT_STR && !e->str) ||
(type == NDFT_UUID && !e->uuid) ||
(type == NDFT_UUID && (!e->uuid || uuid_is_null(*e->uuid))) ||
(type == NDFT_CALLBACK && !e->cb.formatter) ||
type == NDFT_UNSET)
continue;

View file

@ -63,6 +63,11 @@ typedef enum __attribute__((__packed__)) {
// web server, aclk and stream receiver
NDF_SRC_TRANSPORT, // the transport we received the request, one of: http, https, pluginsd
// Netdata Cloud Related
NDF_ACCOUNT_ID,
NDF_USER_NAME,
NDF_USER_ROLE,
// web server and stream receiver
NDF_SRC_IP, // the streaming / web server source IP
NDF_SRC_PORT, // the streaming / web server source Port

View file

@ -173,7 +173,7 @@ static void query_progress_cleanup_to_reuse(QUERY_PROGRESS *qp, uuid_t *transact
uuid_copy(qp->transaction, *transaction);
}
static inline void query_progress_update(QUERY_PROGRESS *qp, usec_t started_ut, HTTP_REQUEST_MODE mode, HTTP_ACL acl, const char *query, const char *payload, const char *client) {
static inline void query_progress_update(QUERY_PROGRESS *qp, usec_t started_ut, HTTP_REQUEST_MODE mode, HTTP_ACL acl, const char *query, BUFFER *payload, const char *client) {
qp->mode = mode;
qp->acl = acl;
qp->started_ut = started_ut ? started_ut : now_realtime_usec();
@ -186,8 +186,8 @@ static inline void query_progress_update(QUERY_PROGRESS *qp, usec_t started_ut,
if(query && *query && !buffer_strlen(qp->query))
buffer_strcat(qp->query, query);
if(payload && *payload && !buffer_strlen(qp->payload))
buffer_strcat(qp->payload, payload);
if(payload && !buffer_strlen(qp->payload))
buffer_copy(qp->payload, payload);
if(client && *client && !buffer_strlen(qp->client))
buffer_strcat(qp->client, client);
@ -210,7 +210,7 @@ static inline void query_progress_unlink_from_cache_unsafe(QUERY_PROGRESS *qp) {
// ----------------------------------------------------------------------------
// Progress API
void query_progress_start_or_update(uuid_t *transaction, usec_t started_ut, HTTP_REQUEST_MODE mode, HTTP_ACL acl, const char *query, const char *payload, const char *client) {
void query_progress_start_or_update(uuid_t *transaction, usec_t started_ut, HTTP_REQUEST_MODE mode, HTTP_ACL acl, const char *query, BUFFER *payload, const char *client) {
if(!transaction)
return;

View file

@ -5,7 +5,7 @@
#include "../libnetdata.h"
void query_progress_start_or_update(uuid_t *transaction, usec_t started_ut, HTTP_REQUEST_MODE mode, HTTP_ACL acl, const char *query, const char *payload, const char *client);
void query_progress_start_or_update(uuid_t *transaction, usec_t started_ut, HTTP_REQUEST_MODE mode, HTTP_ACL acl, const char *query, BUFFER *payload, const char *client);
void query_progress_done_step(uuid_t *transaction, size_t done);
void query_progress_set_finish_line(uuid_t *transaction, size_t all);
void query_progress_finished(uuid_t *transaction, usec_t finished_ut, short int response_code, usec_t duration_ut, size_t response_size, size_t sent_size);

View file

@ -307,6 +307,25 @@ STRING *string_strdupz(const char *str) {
return string;
}
STRING *string_strndupz(const char *str, size_t len) {
if(unlikely(!str || !*str || !len)) return NULL;
#ifdef NETDATA_INTERNAL_CHECKS
uint8_t partition = string_partition_str(str);
#endif
char buf[len + 1];
memcpy(buf, str, len);
buf[len] = '\0';
STRING *string = string_index_search(buf, len + 1);
while(!string)
string = string_index_insert(buf, len + 1);
string_stats_atomic_increment(partition, active_references);
return string;
}
void string_freez(STRING *string) {
if(unlikely(!string)) return;

View file

@ -8,7 +8,10 @@
// STRING implementation
typedef struct netdata_string STRING;
STRING *string_strdupz(const char *str);
STRING *string_strndupz(const char *str, size_t len);
STRING *string_dup(STRING *string);
void string_freez(STRING *string);
size_t string_strlen(STRING *string);

Some files were not shown because too many files have changed in this diff Show more