0
0
Fork 0
mirror of https://github.com/netdata/netdata.git synced 2025-05-19 23:41:46 +00:00
netdata_netdata/web/server/web_client_cache.c
Costa Tsaousis 3e508c8f95
New logging layer ()
* cleanup of logging - wip

* first working iteration

* add errno annotator

* replace old logging functions with netdata_logger()

* cleanup

* update error_limit

* fix remanining error_limit references

* work on fatal()

* started working on structured logs

* full cleanup

* default logging to files; fix all plugins initialization

* fix formatting of numbers

* cleanup and reorg

* fix coverity issues

* cleanup obsolete code

* fix formatting of numbers

* fix log rotation

* fix for older systems

* add detection of systemd journal via stderr

* finished on access.log

* remove left-over transport

* do not add empty fields to the logs

* journal get compact uuids; X-Transaction-ID header is added in web responses

* allow compiling on systems without memfd sealing

* added libnetdata/uuid directory

* move datetime formatters to libnetdata

* add missing files

* link the makefiles in libnetdata

* added uuid_parse_flexi() to parse UUIDs with and without hyphens; the web server now read X-Transaction-ID and uses it for functions and web responses

* added stream receiver, sender, proc plugin and pluginsd log stack

* iso8601 advanced usage; line_splitter module in libnetdata; code cleanup

* add message ids to streaming inbound and outbound connections

* cleanup line_splitter between lines to avoid logging garbage; when killing children, kill them with SIGABRT if internal checks is enabled

* send SIGABRT to external plugins only if we are not shutting down

* fix cross cleanup in pluginsd parser

* fatal when there is a stack error in logs

* compile netdata with -fexceptions

* do not kill external plugins with SIGABRT

* metasync info logs to debug level

* added severity to logs

* added json output; added options per log output; added documentation; fixed issues mentioned

* allow memfd only on linux

* moved journal low level functions to journal.c/h

* move health logs to daemon.log with proper priorities

* fixed a couple of bugs; health log in journal

* updated docs

* systemd-cat-native command to push structured logs to journal from the command line

* fix makefiles

* restored NETDATA_LOG_SEVERITY_LEVEL

* fix makefiles

* systemd-cat-native can also work as the logger of Netdata scripts

* do not require a socket to systemd-journal to log-as-netdata

* alarm notify logs in native format

* properly compare log ids

* fatals log alerts; alarm-notify.sh working

* fix overflow warning

* alarm-notify.sh now logs the request (command line)

* anotate external plugins logs with the function cmd they run

* added context, component and type to alarm-notify.sh; shell sanitization removes control character and characters that may be expanded by bash

* reformatted alarm-notify logs

* unify cgroup-network-helper.sh

* added quotes around params

* charts.d.plugin switched logging to journal native

* quotes for logfmt

* unify the status codes of streaming receivers and senders

* alarm-notify: dont log anything, if there is nothing to do

* all external plugins log to stderr when running outside netdata; alarm-notify now shows an error when notifications menthod are needed but are not available

* migrate cgroup-name.sh to new logging

* systemd-cat-native now supports messages with newlines

* socket.c logs use priority

* cleanup log field types

* inherit the systemd set INVOCATION_ID if found

* allow systemd-cat-native to send messages to a systemd-journal-remote URL

* log2journal command that can convert structured logs to journal export format

* various fixes and documentation of log2journal

* updated log2journal docs

* updated log2journal docs

* updated documentation of fields

* allow compiling without libcurl

* do not use socket as format string

* added version information to newly added tools

* updated documentation and help messages

* fix the namespace socket path

* print errno with error

* do not timeout

* updated docs

* updated docs

* updated docs

* log2journal updated docs and params

* when talking to a remote journal, systemd-cat-native batches the messages

* enable lz4 compression for systemd-cat-native when sending messages to a systemd-journal-remote

* Revert "enable lz4 compression for systemd-cat-native when sending messages to a systemd-journal-remote"

This reverts commit b079d53c11.

* note about uncompressed traffic

* log2journal: code reorg and cleanup to make modular

* finished rewriting log2journal

* more comments

* rewriting rules support

* increased limits

* updated docs

* updated docs

* fix old log call

* use journal only when stderr is connected to journal

* update netdata.spec for libcurl, libpcre2 and log2journal

* pcre2-devel

* do not require pcre2 in centos < 8, amazonlinux < 2023, open suse

* log2journal only on systems pcre2 is available

* ignore log2journal in .gitignore

* avoid log2journal on centos 7, amazonlinux 2 and opensuse

* add pcre2-8 to static build

* undo last commit

* Bundle to static

Signed-off-by: Tasos Katsoulas <tasos@netdata.cloud>

* Add build deps for deb packages

Signed-off-by: Tasos Katsoulas <tasos@netdata.cloud>

* Add dependencies; build from source

Signed-off-by: Tasos Katsoulas <tasos@netdata.cloud>

* Test build for amazon linux and centos expect to fail for suse

Signed-off-by: Tasos Katsoulas <tasos@netdata.cloud>

* fix minor oversight

Signed-off-by: Tasos Katsoulas <tasos@netdata.cloud>

* Reorg code

* Add the install from source (deps) as a TODO
* Not enable the build on suse ecosystem

Signed-off-by: Tasos Katsoulas <tasos@netdata.cloud>

---------

Signed-off-by: Tasos Katsoulas <tasos@netdata.cloud>
Co-authored-by: Tasos Katsoulas <tasos@netdata.cloud>
2023-11-22 10:27:25 +02:00

148 lines
5.2 KiB
C

// SPDX-License-Identifier: GPL-3.0-or-later
#define WEB_SERVER_INTERNALS 1
#include "web_client_cache.h"
// ----------------------------------------------------------------------------
// allocate and free web_clients
// ----------------------------------------------------------------------------
// web clients caching
// When clients connect and disconnect, avoid allocating and releasing memory.
// Instead, when new clients get connected, reuse any memory previously allocated
// for serving web clients that are now disconnected.
// The size of the cache is adaptive. It caches the structures of 2x
// the number of currently connected clients.
static struct clients_cache {
struct {
SPINLOCK spinlock;
struct web_client *head; // the structures of the currently connected clients
size_t count; // the count the currently connected clients
size_t allocated; // the number of allocations
size_t reused; // the number of re-uses
} used;
struct {
SPINLOCK spinlock;
struct web_client *head; // the cached structures, available for future clients
size_t count; // the number of cached structures
} avail;
} web_clients_cache = {
.used = {
.spinlock = NETDATA_SPINLOCK_INITIALIZER,
.head = NULL,
.count = 0,
.reused = 0,
.allocated = 0,
},
.avail = {
.spinlock = NETDATA_SPINLOCK_INITIALIZER,
.head = NULL,
.count = 0,
},
};
// destroy the cache and free all the memory it uses
void web_client_cache_destroy(void) {
internal_error(true, "web_client_cache has %zu used and %zu available clients, allocated %zu, reused %zu (hit %zu%%)."
, web_clients_cache.used.count
, web_clients_cache.avail.count
, web_clients_cache.used.allocated
, web_clients_cache.used.reused
, (web_clients_cache.used.allocated + web_clients_cache.used.reused)?(web_clients_cache.used.reused * 100 / (web_clients_cache.used.allocated + web_clients_cache.used.reused)):0
);
struct web_client *w, *t;
spinlock_lock(&web_clients_cache.avail.spinlock);
w = web_clients_cache.avail.head;
while(w) {
t = w;
w = w->cache.next;
web_client_free(t);
}
web_clients_cache.avail.head = NULL;
web_clients_cache.avail.count = 0;
spinlock_unlock(&web_clients_cache.avail.spinlock);
// DO NOT FREE THEM IF THEY ARE USED
// spinlock_lock(&web_clients_cache.used.spinlock);
// w = web_clients_cache.used.head;
// while(w) {
// t = w;
// w = w->next;
// web_client_free(t);
// }
// web_clients_cache.used.head = NULL;
// web_clients_cache.used.count = 0;
// web_clients_cache.used.reused = 0;
// web_clients_cache.used.allocated = 0;
// spinlock_unlock(&web_clients_cache.used.spinlock);
}
struct web_client *web_client_get_from_cache(void) {
spinlock_lock(&web_clients_cache.avail.spinlock);
struct web_client *w = web_clients_cache.avail.head;
if(w) {
// get it from avail
DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(web_clients_cache.avail.head, w, cache.prev, cache.next);
web_clients_cache.avail.count--;
spinlock_unlock(&web_clients_cache.avail.spinlock);
web_client_reuse_from_cache(w);
spinlock_lock(&web_clients_cache.used.spinlock);
web_clients_cache.used.reused++;
}
else {
spinlock_unlock(&web_clients_cache.avail.spinlock);
w = web_client_create(&netdata_buffers_statistics.buffers_web);
spinlock_lock(&web_clients_cache.used.spinlock);
w->id = global_statistics_web_client_connected();
web_clients_cache.used.allocated++;
}
// link it to used web clients
DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(web_clients_cache.used.head, w, cache.prev, cache.next);
web_clients_cache.used.count++;
spinlock_unlock(&web_clients_cache.used.spinlock);
// initialize it
w->use_count++;
w->mode = WEB_CLIENT_MODE_GET;
memset(w->transaction, 0, sizeof(w->transaction));
return w;
}
void web_client_release_to_cache(struct web_client *w) {
#ifdef ENABLE_HTTPS
netdata_ssl_close(&w->ssl);
#endif
// unlink it from the used
spinlock_lock(&web_clients_cache.used.spinlock);
DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(web_clients_cache.used.head, w, cache.prev, cache.next);
ssize_t used_count = (ssize_t)--web_clients_cache.used.count;
spinlock_unlock(&web_clients_cache.used.spinlock);
spinlock_lock(&web_clients_cache.avail.spinlock);
if(w->use_count > 100 || (used_count > 0 && web_clients_cache.avail.count >= 2 * (size_t)used_count) || (used_count <= 10 && web_clients_cache.avail.count >= 20)) {
spinlock_unlock(&web_clients_cache.avail.spinlock);
// we have too many of them - free it
web_client_free(w);
}
else {
// link it to the avail
DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(web_clients_cache.avail.head, w, cache.prev, cache.next);
web_clients_cache.avail.count++;
spinlock_unlock(&web_clients_cache.avail.spinlock);
}
}