0
0
Fork 0
mirror of https://github.com/netdata/netdata.git synced 2025-04-17 03:02:41 +00:00
netdata_netdata/collectors/plugins.d/pluginsd_parser.h
Costa Tsaousis 3e508c8f95
New logging layer ()
* cleanup of logging - wip

* first working iteration

* add errno annotator

* replace old logging functions with netdata_logger()

* cleanup

* update error_limit

* fix remanining error_limit references

* work on fatal()

* started working on structured logs

* full cleanup

* default logging to files; fix all plugins initialization

* fix formatting of numbers

* cleanup and reorg

* fix coverity issues

* cleanup obsolete code

* fix formatting of numbers

* fix log rotation

* fix for older systems

* add detection of systemd journal via stderr

* finished on access.log

* remove left-over transport

* do not add empty fields to the logs

* journal get compact uuids; X-Transaction-ID header is added in web responses

* allow compiling on systems without memfd sealing

* added libnetdata/uuid directory

* move datetime formatters to libnetdata

* add missing files

* link the makefiles in libnetdata

* added uuid_parse_flexi() to parse UUIDs with and without hyphens; the web server now read X-Transaction-ID and uses it for functions and web responses

* added stream receiver, sender, proc plugin and pluginsd log stack

* iso8601 advanced usage; line_splitter module in libnetdata; code cleanup

* add message ids to streaming inbound and outbound connections

* cleanup line_splitter between lines to avoid logging garbage; when killing children, kill them with SIGABRT if internal checks is enabled

* send SIGABRT to external plugins only if we are not shutting down

* fix cross cleanup in pluginsd parser

* fatal when there is a stack error in logs

* compile netdata with -fexceptions

* do not kill external plugins with SIGABRT

* metasync info logs to debug level

* added severity to logs

* added json output; added options per log output; added documentation; fixed issues mentioned

* allow memfd only on linux

* moved journal low level functions to journal.c/h

* move health logs to daemon.log with proper priorities

* fixed a couple of bugs; health log in journal

* updated docs

* systemd-cat-native command to push structured logs to journal from the command line

* fix makefiles

* restored NETDATA_LOG_SEVERITY_LEVEL

* fix makefiles

* systemd-cat-native can also work as the logger of Netdata scripts

* do not require a socket to systemd-journal to log-as-netdata

* alarm notify logs in native format

* properly compare log ids

* fatals log alerts; alarm-notify.sh working

* fix overflow warning

* alarm-notify.sh now logs the request (command line)

* anotate external plugins logs with the function cmd they run

* added context, component and type to alarm-notify.sh; shell sanitization removes control character and characters that may be expanded by bash

* reformatted alarm-notify logs

* unify cgroup-network-helper.sh

* added quotes around params

* charts.d.plugin switched logging to journal native

* quotes for logfmt

* unify the status codes of streaming receivers and senders

* alarm-notify: dont log anything, if there is nothing to do

* all external plugins log to stderr when running outside netdata; alarm-notify now shows an error when notifications menthod are needed but are not available

* migrate cgroup-name.sh to new logging

* systemd-cat-native now supports messages with newlines

* socket.c logs use priority

* cleanup log field types

* inherit the systemd set INVOCATION_ID if found

* allow systemd-cat-native to send messages to a systemd-journal-remote URL

* log2journal command that can convert structured logs to journal export format

* various fixes and documentation of log2journal

* updated log2journal docs

* updated log2journal docs

* updated documentation of fields

* allow compiling without libcurl

* do not use socket as format string

* added version information to newly added tools

* updated documentation and help messages

* fix the namespace socket path

* print errno with error

* do not timeout

* updated docs

* updated docs

* updated docs

* log2journal updated docs and params

* when talking to a remote journal, systemd-cat-native batches the messages

* enable lz4 compression for systemd-cat-native when sending messages to a systemd-journal-remote

* Revert "enable lz4 compression for systemd-cat-native when sending messages to a systemd-journal-remote"

This reverts commit b079d53c11.

* note about uncompressed traffic

* log2journal: code reorg and cleanup to make modular

* finished rewriting log2journal

* more comments

* rewriting rules support

* increased limits

* updated docs

* updated docs

* fix old log call

* use journal only when stderr is connected to journal

* update netdata.spec for libcurl, libpcre2 and log2journal

* pcre2-devel

* do not require pcre2 in centos < 8, amazonlinux < 2023, open suse

* log2journal only on systems pcre2 is available

* ignore log2journal in .gitignore

* avoid log2journal on centos 7, amazonlinux 2 and opensuse

* add pcre2-8 to static build

* undo last commit

* Bundle to static

Signed-off-by: Tasos Katsoulas <tasos@netdata.cloud>

* Add build deps for deb packages

Signed-off-by: Tasos Katsoulas <tasos@netdata.cloud>

* Add dependencies; build from source

Signed-off-by: Tasos Katsoulas <tasos@netdata.cloud>

* Test build for amazon linux and centos expect to fail for suse

Signed-off-by: Tasos Katsoulas <tasos@netdata.cloud>

* fix minor oversight

Signed-off-by: Tasos Katsoulas <tasos@netdata.cloud>

* Reorg code

* Add the install from source (deps) as a TODO
* Not enable the build on suse ecosystem

Signed-off-by: Tasos Katsoulas <tasos@netdata.cloud>

---------

Signed-off-by: Tasos Katsoulas <tasos@netdata.cloud>
Co-authored-by: Tasos Katsoulas <tasos@netdata.cloud>
2023-11-22 10:27:25 +02:00

244 lines
7.7 KiB
C

// SPDX-License-Identifier: GPL-3.0-or-later
#ifndef NETDATA_PLUGINSD_PARSER_H
#define NETDATA_PLUGINSD_PARSER_H
#include "daemon/common.h"
#define WORKER_PARSER_FIRST_JOB 3
// this has to be in-sync with the same at receiver.c
#define WORKER_RECEIVER_JOB_REPLICATION_COMPLETION (WORKER_PARSER_FIRST_JOB - 3)
// this controls the max response size of a function
#define PLUGINSD_MAX_DEFERRED_SIZE (20 * 1024 * 1024)
#define PLUGINSD_MIN_RRDSET_POINTERS_CACHE 1024
// PARSER return codes
typedef enum __attribute__ ((__packed__)) parser_rc {
PARSER_RC_OK, // Callback was successful, go on
PARSER_RC_STOP, // Callback says STOP
PARSER_RC_ERROR // Callback failed (abort rest of callbacks)
} PARSER_RC;
typedef enum __attribute__ ((__packed__)) parser_input_type {
PARSER_INPUT_SPLIT = (1 << 1),
PARSER_DEFER_UNTIL_KEYWORD = (1 << 2),
} PARSER_INPUT_TYPE;
typedef enum __attribute__ ((__packed__)) {
PARSER_INIT_PLUGINSD = (1 << 1),
PARSER_INIT_STREAMING = (1 << 2),
PARSER_REP_METADATA = (1 << 3),
} PARSER_REPERTOIRE;
struct parser;
typedef PARSER_RC (*keyword_function)(char **words, size_t num_words, struct parser *parser);
typedef struct parser_keyword {
char *keyword;
size_t id;
PARSER_REPERTOIRE repertoire;
size_t worker_job_id;
} PARSER_KEYWORD;
typedef struct parser_user_object {
bool cleanup_slots;
RRDSET *st;
RRDHOST *host;
void *opaque;
struct plugind *cd;
int trust_durations;
RRDLABELS *new_host_labels;
RRDLABELS *chart_rrdlabels_linked_temporarily;
size_t data_collections_count;
int enabled;
#ifdef NETDATA_LOG_STREAM_RECEIVE
FILE *stream_log_fp;
PARSER_REPERTOIRE stream_log_repertoire;
#endif
STREAM_CAPABILITIES capabilities; // receiver capabilities
struct {
bool parsing_host;
uuid_t machine_guid;
char machine_guid_str[UUID_STR_LEN];
STRING *hostname;
RRDLABELS *rrdlabels;
} host_define;
struct parser_user_object_replay {
time_t start_time;
time_t end_time;
usec_t start_time_ut;
usec_t end_time_ut;
time_t wall_clock_time;
bool rset_enabled;
} replay;
struct parser_user_object_v2 {
bool locked_data_collection;
RRDSET_STREAM_BUFFER stream_buffer; // sender capabilities in this
time_t update_every;
time_t end_time;
time_t wall_clock_time;
bool ml_locked;
} v2;
} PARSER_USER_OBJECT;
typedef struct parser {
uint8_t version; // Parser version
PARSER_REPERTOIRE repertoire;
uint32_t flags;
int fd; // Socket
FILE *fp_input; // Input source e.g. stream
FILE *fp_output; // Stream to send commands to plugin
#ifdef ENABLE_HTTPS
NETDATA_SSL *ssl_output;
#endif
#ifdef ENABLE_H2O
void *h2o_ctx; // if set we use h2o_stream functions to send data
#endif
PARSER_USER_OBJECT user; // User defined structure to hold extra state between calls
struct buffered_reader reader;
struct line_splitter line;
PARSER_KEYWORD *keyword;
struct {
const char *end_keyword;
BUFFER *response;
void (*action)(struct parser *parser, void *action_data);
void *action_data;
} defer;
struct {
DICTIONARY *functions;
usec_t smaller_timeout;
} inflight;
struct {
SPINLOCK spinlock;
} writer;
} PARSER;
PARSER *parser_init(struct parser_user_object *user, FILE *fp_input, FILE *fp_output, int fd, PARSER_INPUT_TYPE flags, void *ssl);
void parser_init_repertoire(PARSER *parser, PARSER_REPERTOIRE repertoire);
void parser_destroy(PARSER *working_parser);
void pluginsd_cleanup_v2(PARSER *parser);
void inflight_functions_init(PARSER *parser);
void pluginsd_keywords_init(PARSER *parser, PARSER_REPERTOIRE repertoire);
PARSER_RC parser_execute(PARSER *parser, PARSER_KEYWORD *keyword, char **words, size_t num_words);
static inline int find_first_keyword(const char *src, char *dst, int dst_size, bool *isspace_map) {
const char *s = src, *keyword_start;
while (unlikely(isspace_map[(uint8_t)*s])) s++;
keyword_start = s;
while (likely(*s && !isspace_map[(uint8_t)*s]) && dst_size > 1) {
*dst++ = *s++;
dst_size--;
}
*dst = '\0';
return dst_size == 0 ? 0 : (int) (s - keyword_start);
}
PARSER_KEYWORD *gperf_lookup_keyword(register const char *str, register size_t len);
static inline PARSER_KEYWORD *parser_find_keyword(PARSER *parser, const char *command) {
PARSER_KEYWORD *t = gperf_lookup_keyword(command, strlen(command));
if(t && (t->repertoire & parser->repertoire))
return t;
return NULL;
}
bool parser_reconstruct_node(BUFFER *wb, void *ptr);
bool parser_reconstruct_instance(BUFFER *wb, void *ptr);
bool parser_reconstruct_context(BUFFER *wb, void *ptr);
static inline int parser_action(PARSER *parser, char *input) {
#ifdef NETDATA_LOG_STREAM_RECEIVE
static __thread char line[PLUGINSD_LINE_MAX + 1];
strncpyz(line, input, sizeof(line) - 1);
#endif
parser->line.count++;
if(unlikely(parser->flags & PARSER_DEFER_UNTIL_KEYWORD)) {
char command[100 + 1];
bool has_keyword = find_first_keyword(input, command, 100, isspace_map_pluginsd);
if(!has_keyword || strcmp(command, parser->defer.end_keyword) != 0) {
if(parser->defer.response) {
buffer_strcat(parser->defer.response, input);
if(buffer_strlen(parser->defer.response) > PLUGINSD_MAX_DEFERRED_SIZE) {
// more than PLUGINSD_MAX_DEFERRED_SIZE of data,
// or a bad plugin that did not send the end_keyword
internal_error(true, "PLUGINSD: deferred response is too big (%zu bytes). Stopping this plugin.", buffer_strlen(parser->defer.response));
return 1;
}
}
return 0;
}
else {
// call the action
parser->defer.action(parser, parser->defer.action_data);
// empty everything
parser->defer.action = NULL;
parser->defer.action_data = NULL;
parser->defer.end_keyword = NULL;
parser->defer.response = NULL;
parser->flags &= ~PARSER_DEFER_UNTIL_KEYWORD;
}
return 0;
}
parser->line.num_words = quoted_strings_splitter_pluginsd(input, parser->line.words, PLUGINSD_MAX_WORDS);
const char *command = get_word(parser->line.words, parser->line.num_words, 0);
if(unlikely(!command)) {
line_splitter_reset(&parser->line);
return 0;
}
PARSER_RC rc;
parser->keyword = parser_find_keyword(parser, command);
if(likely(parser->keyword)) {
worker_is_busy(parser->keyword->worker_job_id);
#ifdef NETDATA_LOG_STREAM_RECEIVE
if(parser->user.stream_log_fp && parser->keyword->repertoire & parser->user.stream_log_repertoire)
fprintf(parser->user.stream_log_fp, "%s", line);
#endif
rc = parser_execute(parser, parser->keyword, parser->line.words, parser->line.num_words);
// rc = (*t->func)(words, num_words, parser);
worker_is_idle();
}
else
rc = PARSER_RC_ERROR;
if(rc == PARSER_RC_ERROR) {
CLEAN_BUFFER *wb = buffer_create(PLUGINSD_LINE_MAX, NULL);
line_splitter_reconstruct_line(wb, &parser->line);
netdata_log_error("PLUGINSD: parser_action('%s') failed on line %zu: { %s } (quotes added to show parsing)",
command, parser->line.count, buffer_tostring(wb));
}
line_splitter_reset(&parser->line);
return (rc == PARSER_RC_ERROR || rc == PARSER_RC_STOP);
}
#endif //NETDATA_PLUGINSD_PARSER_H