0
0
Fork 0
mirror of https://github.com/netdata/netdata.git synced 2025-04-15 01:58:34 +00:00
netdata_netdata/libnetdata/buffered_reader/buffered_reader.h
Costa Tsaousis 3e508c8f95
New logging layer ()
* cleanup of logging - wip

* first working iteration

* add errno annotator

* replace old logging functions with netdata_logger()

* cleanup

* update error_limit

* fix remanining error_limit references

* work on fatal()

* started working on structured logs

* full cleanup

* default logging to files; fix all plugins initialization

* fix formatting of numbers

* cleanup and reorg

* fix coverity issues

* cleanup obsolete code

* fix formatting of numbers

* fix log rotation

* fix for older systems

* add detection of systemd journal via stderr

* finished on access.log

* remove left-over transport

* do not add empty fields to the logs

* journal get compact uuids; X-Transaction-ID header is added in web responses

* allow compiling on systems without memfd sealing

* added libnetdata/uuid directory

* move datetime formatters to libnetdata

* add missing files

* link the makefiles in libnetdata

* added uuid_parse_flexi() to parse UUIDs with and without hyphens; the web server now read X-Transaction-ID and uses it for functions and web responses

* added stream receiver, sender, proc plugin and pluginsd log stack

* iso8601 advanced usage; line_splitter module in libnetdata; code cleanup

* add message ids to streaming inbound and outbound connections

* cleanup line_splitter between lines to avoid logging garbage; when killing children, kill them with SIGABRT if internal checks is enabled

* send SIGABRT to external plugins only if we are not shutting down

* fix cross cleanup in pluginsd parser

* fatal when there is a stack error in logs

* compile netdata with -fexceptions

* do not kill external plugins with SIGABRT

* metasync info logs to debug level

* added severity to logs

* added json output; added options per log output; added documentation; fixed issues mentioned

* allow memfd only on linux

* moved journal low level functions to journal.c/h

* move health logs to daemon.log with proper priorities

* fixed a couple of bugs; health log in journal

* updated docs

* systemd-cat-native command to push structured logs to journal from the command line

* fix makefiles

* restored NETDATA_LOG_SEVERITY_LEVEL

* fix makefiles

* systemd-cat-native can also work as the logger of Netdata scripts

* do not require a socket to systemd-journal to log-as-netdata

* alarm notify logs in native format

* properly compare log ids

* fatals log alerts; alarm-notify.sh working

* fix overflow warning

* alarm-notify.sh now logs the request (command line)

* anotate external plugins logs with the function cmd they run

* added context, component and type to alarm-notify.sh; shell sanitization removes control character and characters that may be expanded by bash

* reformatted alarm-notify logs

* unify cgroup-network-helper.sh

* added quotes around params

* charts.d.plugin switched logging to journal native

* quotes for logfmt

* unify the status codes of streaming receivers and senders

* alarm-notify: dont log anything, if there is nothing to do

* all external plugins log to stderr when running outside netdata; alarm-notify now shows an error when notifications menthod are needed but are not available

* migrate cgroup-name.sh to new logging

* systemd-cat-native now supports messages with newlines

* socket.c logs use priority

* cleanup log field types

* inherit the systemd set INVOCATION_ID if found

* allow systemd-cat-native to send messages to a systemd-journal-remote URL

* log2journal command that can convert structured logs to journal export format

* various fixes and documentation of log2journal

* updated log2journal docs

* updated log2journal docs

* updated documentation of fields

* allow compiling without libcurl

* do not use socket as format string

* added version information to newly added tools

* updated documentation and help messages

* fix the namespace socket path

* print errno with error

* do not timeout

* updated docs

* updated docs

* updated docs

* log2journal updated docs and params

* when talking to a remote journal, systemd-cat-native batches the messages

* enable lz4 compression for systemd-cat-native when sending messages to a systemd-journal-remote

* Revert "enable lz4 compression for systemd-cat-native when sending messages to a systemd-journal-remote"

This reverts commit b079d53c11.

* note about uncompressed traffic

* log2journal: code reorg and cleanup to make modular

* finished rewriting log2journal

* more comments

* rewriting rules support

* increased limits

* updated docs

* updated docs

* fix old log call

* use journal only when stderr is connected to journal

* update netdata.spec for libcurl, libpcre2 and log2journal

* pcre2-devel

* do not require pcre2 in centos < 8, amazonlinux < 2023, open suse

* log2journal only on systems pcre2 is available

* ignore log2journal in .gitignore

* avoid log2journal on centos 7, amazonlinux 2 and opensuse

* add pcre2-8 to static build

* undo last commit

* Bundle to static

Signed-off-by: Tasos Katsoulas <tasos@netdata.cloud>

* Add build deps for deb packages

Signed-off-by: Tasos Katsoulas <tasos@netdata.cloud>

* Add dependencies; build from source

Signed-off-by: Tasos Katsoulas <tasos@netdata.cloud>

* Test build for amazon linux and centos expect to fail for suse

Signed-off-by: Tasos Katsoulas <tasos@netdata.cloud>

* fix minor oversight

Signed-off-by: Tasos Katsoulas <tasos@netdata.cloud>

* Reorg code

* Add the install from source (deps) as a TODO
* Not enable the build on suse ecosystem

Signed-off-by: Tasos Katsoulas <tasos@netdata.cloud>

---------

Signed-off-by: Tasos Katsoulas <tasos@netdata.cloud>
Co-authored-by: Tasos Katsoulas <tasos@netdata.cloud>
2023-11-22 10:27:25 +02:00

146 lines
4.3 KiB
C

// SPDX-License-Identifier: GPL-3.0-or-later
#include "../libnetdata.h"
#ifndef NETDATA_BUFFERED_READER_H
#define NETDATA_BUFFERED_READER_H
struct buffered_reader {
ssize_t read_len;
ssize_t pos;
char read_buffer[PLUGINSD_LINE_MAX + 1];
};
static inline void buffered_reader_init(struct buffered_reader *reader) {
reader->read_buffer[0] = '\0';
reader->read_len = 0;
reader->pos = 0;
}
typedef enum {
BUFFERED_READER_READ_OK = 0,
BUFFERED_READER_READ_FAILED = -1,
BUFFERED_READER_READ_BUFFER_FULL = -2,
BUFFERED_READER_READ_POLLERR = -3,
BUFFERED_READER_READ_POLLHUP = -4,
BUFFERED_READER_READ_POLLNVAL = -5,
BUFFERED_READER_READ_POLL_UNKNOWN = -6,
BUFFERED_READER_READ_POLL_TIMEOUT = -7,
BUFFERED_READER_READ_POLL_FAILED = -8,
} buffered_reader_ret_t;
static inline buffered_reader_ret_t buffered_reader_read(struct buffered_reader *reader, int fd) {
#ifdef NETDATA_INTERNAL_CHECKS
if(reader->read_buffer[reader->read_len] != '\0')
fatal("read_buffer does not start with zero");
#endif
char *read_at = reader->read_buffer + reader->read_len;
ssize_t remaining = sizeof(reader->read_buffer) - reader->read_len - 1;
if(unlikely(remaining <= 0))
return BUFFERED_READER_READ_BUFFER_FULL;
ssize_t bytes_read = read(fd, read_at, remaining);
if(unlikely(bytes_read <= 0))
return BUFFERED_READER_READ_FAILED;
reader->read_len += bytes_read;
reader->read_buffer[reader->read_len] = '\0';
return BUFFERED_READER_READ_OK;
}
static inline buffered_reader_ret_t buffered_reader_read_timeout(struct buffered_reader *reader, int fd, int timeout_ms, bool log_error) {
errno = 0;
struct pollfd fds[1];
fds[0].fd = fd;
fds[0].events = POLLIN;
int ret = poll(fds, 1, timeout_ms);
if (ret > 0) {
/* There is data to read */
if (fds[0].revents & POLLIN)
return buffered_reader_read(reader, fd);
else if(fds[0].revents & POLLERR) {
if(log_error)
netdata_log_error("PARSER: read failed: POLLERR.");
return BUFFERED_READER_READ_POLLERR;
}
else if(fds[0].revents & POLLHUP) {
if(log_error)
netdata_log_error("PARSER: read failed: POLLHUP.");
return BUFFERED_READER_READ_POLLHUP;
}
else if(fds[0].revents & POLLNVAL) {
if(log_error)
netdata_log_error("PARSER: read failed: POLLNVAL.");
return BUFFERED_READER_READ_POLLNVAL;
}
if(log_error)
netdata_log_error("PARSER: poll() returned positive number, but POLLIN|POLLERR|POLLHUP|POLLNVAL are not set.");
return BUFFERED_READER_READ_POLL_UNKNOWN;
}
else if (ret == 0) {
if(log_error)
netdata_log_error("PARSER: timeout while waiting for data.");
return BUFFERED_READER_READ_POLL_TIMEOUT;
}
if(log_error)
netdata_log_error("PARSER: poll() failed with code %d.", ret);
return BUFFERED_READER_READ_POLL_FAILED;
}
/* Produce a full line if one exists, statefully return where we start next time.
* When we hit the end of the buffer with a partial line move it to the beginning for the next fill.
*/
static inline bool buffered_reader_next_line(struct buffered_reader *reader, BUFFER *dst) {
buffer_need_bytes(dst, reader->read_len - reader->pos + 2);
size_t start = reader->pos;
char *ss = &reader->read_buffer[start];
char *se = &reader->read_buffer[reader->read_len];
char *ds = &dst->buffer[dst->len];
char *de = &ds[dst->size - dst->len - 2];
if(ss >= se) {
*ds = '\0';
reader->pos = 0;
reader->read_len = 0;
reader->read_buffer[reader->read_len] = '\0';
return false;
}
// copy all bytes to buffer
while(ss < se && ds < de && *ss != '\n') {
*ds++ = *ss++;
dst->len++;
}
// if we have a newline, return the buffer
if(ss < se && ds < de && *ss == '\n') {
// newline found in the r->read_buffer
*ds++ = *ss++; // copy the newline too
dst->len++;
*ds = '\0';
reader->pos = ss - reader->read_buffer;
return true;
}
reader->pos = 0;
reader->read_len = 0;
reader->read_buffer[reader->read_len] = '\0';
return false;
}
#endif //NETDATA_BUFFERED_READER_H