mirror of
https://github.com/netdata/netdata.git
synced 2025-04-10 16:17:36 +00:00
eBPF memory (#18232)
This commit is contained in:
parent
9f148ab67e
commit
3590d9fc27
35 changed files with 1081 additions and 761 deletions
packaging/cmake/Modules
src/collectors/ebpf.plugin
ebpf.c
ebpf.d
ebpf.hebpf_apps.cebpf_apps.hebpf_cachestat.cebpf_cachestat.hebpf_cgroup.hebpf_dcstat.cebpf_dcstat.hebpf_fd.cebpf_fd.hebpf_filesystem.cebpf_functions.cebpf_oomkill.cebpf_process.cebpf_process.hebpf_shm.cebpf_shm.hebpf_socket.cebpf_socket.hebpf_swap.cebpf_swap.hebpf_vfs.cebpf_vfs.hmetadata.yaml
|
@ -11,8 +11,8 @@ set(ebpf-co-re_SOURCE_DIR "${CMAKE_BINARY_DIR}/ebpf-co-re")
|
|||
function(netdata_fetch_ebpf_co_re)
|
||||
ExternalProject_Add(
|
||||
ebpf-co-re
|
||||
URL https://github.com/netdata/ebpf-co-re/releases/download/v1.4.5/netdata-ebpf-co-re-glibc-v1.4.5.tar.xz
|
||||
URL_HASH SHA256=6937a167f6f8c65a0b0528a297df9944d15a649c9af34a70a678d4eabbbf22d1
|
||||
URL https://github.com/netdata/ebpf-co-re/releases/download/v1.4.5.1/netdata-ebpf-co-re-glibc-v1.4.5.1.tar.xz
|
||||
URL_HASH SHA256=10d49602c873932a4e0a3717a4af2137434b480d0170c2fb000ec70ae02f6e30
|
||||
SOURCE_DIR "${ebpf-co-re_SOURCE_DIR}"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
|
|
|
@ -20,19 +20,19 @@ function(netdata_fetch_legacy_ebpf_code)
|
|||
endif()
|
||||
|
||||
if(need_static)
|
||||
set(_hash 3f97034a595b5fd52ac4c5f43ce43085cc1391f39f2a281191efb15cc9666af4)
|
||||
set(_hash 1c0c8f1177514e9e21a23c28841406595e57b7cfacd93746ff2d6b25987b94a6)
|
||||
set(_libc "static")
|
||||
elseif(_libc STREQUAL "glibc")
|
||||
set(_hash 66094175e4d79b8a7222bc20d9e0d1bfbd37414891f88fc0113da53a97f8896a)
|
||||
set(_hash e365a76a2bb25190f1d91e4dea2cfc5ff5db63b5238fbfbc89f72755cf85a12c)
|
||||
elseif(_libc STREQUAL "musl")
|
||||
set(_hash 58daad4a82cf3c511372892dd21b2825fcb138aad22c1db6bc889b1965439f5e)
|
||||
set(_hash ec14dcdfa29d4fba1cea6763740b9d37683515bde88a1a29b6e7c70ce01a604d)
|
||||
else()
|
||||
message(FATAL_ERROR "Could not determine libc implementation, unable to install eBPF legacy code.")
|
||||
endif()
|
||||
|
||||
ExternalProject_Add(
|
||||
ebpf-code-legacy
|
||||
URL https://github.com/netdata/kernel-collector/releases/download/v1.4.5/netdata-kernel-collector-${_libc}-v1.4.5.tar.xz
|
||||
URL https://github.com/netdata/kernel-collector/releases/download/v1.4.5.1/netdata-kernel-collector-${_libc}-v1.4.5.1.tar.xz
|
||||
URL_HASH SHA256=${_hash}
|
||||
SOURCE_DIR "${ebpf-legacy_SOURCE_DIR}"
|
||||
CONFIGURE_COMMAND ""
|
||||
|
|
|
@ -30,6 +30,7 @@ int ebpf_nprocs;
|
|||
int isrh = 0;
|
||||
int main_thread_id = 0;
|
||||
int process_pid_fd = -1;
|
||||
uint64_t collect_pids = 0;
|
||||
static size_t global_iterations_counter = 1;
|
||||
bool publish_internal_metrics = true;
|
||||
|
||||
|
@ -996,7 +997,7 @@ static inline void ebpf_create_apps_for_module(ebpf_module_t *em, struct ebpf_ta
|
|||
*/
|
||||
static void ebpf_create_apps_charts(struct ebpf_target *root)
|
||||
{
|
||||
if (unlikely(!ebpf_all_pids))
|
||||
if (unlikely(!ebpf_pids))
|
||||
return;
|
||||
|
||||
struct ebpf_target *w;
|
||||
|
@ -1028,21 +1029,15 @@ static void ebpf_create_apps_charts(struct ebpf_target *root)
|
|||
}
|
||||
}
|
||||
|
||||
int i;
|
||||
if (!newly_added) {
|
||||
if (newly_added) {
|
||||
int i;
|
||||
for (i = 0; i < EBPF_MODULE_FUNCTION_IDX ; i++) {
|
||||
ebpf_module_t *current = &ebpf_modules[i];
|
||||
if (current->apps_charts & NETDATA_EBPF_APPS_FLAG_CHART_CREATED)
|
||||
if (!(collect_pids & (1<<i)))
|
||||
continue;
|
||||
|
||||
ebpf_module_t *current = &ebpf_modules[i];
|
||||
ebpf_create_apps_for_module(current, root);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < EBPF_MODULE_FUNCTION_IDX ; i++) {
|
||||
ebpf_module_t *current = &ebpf_modules[i];
|
||||
ebpf_create_apps_for_module(current, root);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2680,7 +2675,7 @@ static void ebpf_allocate_common_vectors()
|
|||
{
|
||||
ebpf_judy_pid.pid_table = ebpf_allocate_pid_aral(NETDATA_EBPF_PID_SOCKET_ARAL_TABLE_NAME,
|
||||
sizeof(netdata_ebpf_judy_pid_stats_t));
|
||||
ebpf_all_pids = callocz((size_t)pid_max, sizeof(struct ebpf_pid_stat *));
|
||||
ebpf_pids = callocz((size_t)pid_max, sizeof(ebpf_pid_data_t));
|
||||
ebpf_aral_init();
|
||||
}
|
||||
|
||||
|
@ -3014,7 +3009,7 @@ static int ebpf_load_collector_config(char *path, int *disable_cgroups, int upda
|
|||
/**
|
||||
* Set global variables reading environment variables
|
||||
*/
|
||||
void set_global_variables()
|
||||
static void ebpf_set_global_variables()
|
||||
{
|
||||
// Get environment variables
|
||||
ebpf_plugin_dir = getenv("NETDATA_PLUGINS_DIR");
|
||||
|
@ -3418,6 +3413,11 @@ void ebpf_send_statistic_data()
|
|||
}
|
||||
ebpf_write_end_chart();
|
||||
|
||||
ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, "monitoring_pid", "");
|
||||
write_chart_dimension("user", ebpf_all_pids_count);
|
||||
write_chart_dimension("kernel", ebpf_hash_table_pids_count);
|
||||
ebpf_write_end_chart();
|
||||
|
||||
ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_LIFE_TIME, "");
|
||||
for (i = 0; i < EBPF_MODULE_FUNCTION_IDX ; i++) {
|
||||
ebpf_module_t *wem = &ebpf_modules[i];
|
||||
|
@ -3489,6 +3489,37 @@ static void update_internal_metric_variable()
|
|||
publish_internal_metrics = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create PIDS Chart
|
||||
*
|
||||
* Write to standard output current values for PIDSs charts.
|
||||
*
|
||||
* @param order order to display chart
|
||||
* @param update_every time used to update charts
|
||||
*/
|
||||
static void ebpf_create_pids_chart(int order, int update_every)
|
||||
{
|
||||
ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
|
||||
"monitoring_pid",
|
||||
"",
|
||||
"Total number of monitored PIDs",
|
||||
"pids",
|
||||
NETDATA_EBPF_FAMILY,
|
||||
NETDATA_EBPF_CHART_TYPE_LINE,
|
||||
"netdata.ebpf_pids",
|
||||
order,
|
||||
update_every,
|
||||
"main");
|
||||
|
||||
ebpf_write_global_dimension("user",
|
||||
"user",
|
||||
ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
|
||||
|
||||
ebpf_write_global_dimension("kernel",
|
||||
"kernel",
|
||||
ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create Thread Chart
|
||||
*
|
||||
|
@ -3538,7 +3569,7 @@ static void ebpf_create_thread_chart(char *name,
|
|||
(char *)em->info.thread_name,
|
||||
ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create chart for Load Thread
|
||||
|
@ -3741,6 +3772,8 @@ static void ebpf_create_statistic_charts(int update_every)
|
|||
update_every,
|
||||
NULL);
|
||||
|
||||
ebpf_create_pids_chart(NETDATA_EBPF_ORDER_PIDS, update_every);
|
||||
|
||||
ebpf_create_thread_chart(NETDATA_EBPF_LIFE_TIME,
|
||||
"Time remaining for thread.",
|
||||
"seconds",
|
||||
|
@ -3974,18 +4007,18 @@ int main(int argc, char **argv)
|
|||
clocks_init();
|
||||
nd_log_initialize_for_external_plugins(NETDATA_EBPF_PLUGIN_NAME);
|
||||
|
||||
main_thread_id = gettid_cached();
|
||||
|
||||
set_global_variables();
|
||||
ebpf_parse_args(argc, argv);
|
||||
ebpf_manage_pid(getpid());
|
||||
|
||||
ebpf_set_global_variables();
|
||||
if (ebpf_can_plugin_load_code(running_on_kernel, NETDATA_EBPF_PLUGIN_NAME))
|
||||
return 2;
|
||||
|
||||
if (ebpf_adjust_memory_limit())
|
||||
return 3;
|
||||
|
||||
main_thread_id = gettid_cached();
|
||||
|
||||
ebpf_parse_args(argc, argv);
|
||||
ebpf_manage_pid(getpid());
|
||||
|
||||
signal(SIGINT, ebpf_stop_threads);
|
||||
signal(SIGQUIT, ebpf_stop_threads);
|
||||
signal(SIGTERM, ebpf_stop_threads);
|
||||
|
@ -4018,7 +4051,7 @@ int main(int argc, char **argv)
|
|||
ebpf_cgroup_integration,
|
||||
NULL);
|
||||
|
||||
int i;
|
||||
uint32_t i;
|
||||
for (i = 0; ebpf_threads[i].name != NULL; i++) {
|
||||
struct netdata_static_thread *st = &ebpf_threads[i];
|
||||
|
||||
|
@ -4028,6 +4061,10 @@ int main(int argc, char **argv)
|
|||
if (em->enabled != NETDATA_THREAD_EBPF_NOT_RUNNING) {
|
||||
em->enabled = NETDATA_THREAD_EBPF_RUNNING;
|
||||
em->lifetime = EBPF_NON_FUNCTION_LIFE_TIME;
|
||||
|
||||
if (em->functions.apps_routine && (em->apps_charts || em->cgroup_charts)) {
|
||||
collect_pids |= 1<<i;
|
||||
}
|
||||
st->thread = nd_thread_create(st->name, NETDATA_THREAD_OPTION_JOINABLE, st->start_routine, em);
|
||||
} else {
|
||||
em->lifetime = EBPF_DEFAULT_LIFETIME;
|
||||
|
@ -4038,7 +4075,7 @@ int main(int argc, char **argv)
|
|||
heartbeat_t hb;
|
||||
heartbeat_init(&hb);
|
||||
int update_apps_every = (int) EBPF_CFG_UPDATE_APPS_EVERY_DEFAULT;
|
||||
int max_period = update_apps_every * EBPF_CLEANUP_FACTOR;
|
||||
uint32_t max_period = EBPF_CLEANUP_FACTOR;
|
||||
int update_apps_list = update_apps_every - 1;
|
||||
int process_maps_per_core = ebpf_modules[EBPF_MODULE_PROCESS_IDX].maps_per_core;
|
||||
//Plugin will be killed when it receives a signal
|
||||
|
@ -4057,12 +4094,16 @@ int main(int argc, char **argv)
|
|||
if (++update_apps_list == update_apps_every) {
|
||||
update_apps_list = 0;
|
||||
pthread_mutex_lock(&lock);
|
||||
pthread_mutex_lock(&collect_data_mutex);
|
||||
ebpf_cleanup_exited_pids(max_period);
|
||||
collect_data_for_all_processes(process_pid_fd, process_maps_per_core);
|
||||
if (collect_pids) {
|
||||
pthread_mutex_lock(&collect_data_mutex);
|
||||
ebpf_parse_proc_files();
|
||||
if (collect_pids & (1<<EBPF_MODULE_PROCESS_IDX)) {
|
||||
collect_data_for_all_processes(process_pid_fd, process_maps_per_core, max_period);
|
||||
}
|
||||
|
||||
ebpf_create_apps_charts(apps_groups_root_target);
|
||||
pthread_mutex_unlock(&collect_data_mutex);
|
||||
ebpf_create_apps_charts(apps_groups_root_target);
|
||||
pthread_mutex_unlock(&collect_data_mutex);
|
||||
}
|
||||
pthread_mutex_unlock(&lock);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,6 +37,6 @@
|
|||
# pid table size = 32768
|
||||
ebpf type format = auto
|
||||
ebpf co-re tracing = trampoline
|
||||
collect pid = all
|
||||
collect pid = real parent
|
||||
# maps per core = yes
|
||||
lifetime = 300
|
||||
|
|
|
@ -35,6 +35,6 @@
|
|||
# pid table size = 32768
|
||||
ebpf type format = auto
|
||||
ebpf co-re tracing = trampoline
|
||||
collect pid = all
|
||||
collect pid = real parent
|
||||
# maps per core = yes
|
||||
lifetime = 300
|
||||
|
|
|
@ -23,5 +23,6 @@
|
|||
# pid table size = 32768
|
||||
ebpf type format = auto
|
||||
ebpf co-re tracing = trampoline
|
||||
collect pid = real parent
|
||||
# maps per core = yes
|
||||
lifetime = 300
|
||||
|
|
|
@ -3,9 +3,21 @@
|
|||
# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
|
||||
# new charts for the return of these functions, such as errors.
|
||||
#
|
||||
# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin`
|
||||
# or `cgroups.plugin`.
|
||||
# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
|
||||
# the setting `apps` and `cgroups` to 'no'.
|
||||
#
|
||||
# The `pid table size` defines the maximum number of PIDs stored inside the hash table.
|
||||
#
|
||||
# The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.6.
|
||||
#
|
||||
# The `lifetime` defines the time length a thread will run when it is enabled by a function.
|
||||
#
|
||||
[global]
|
||||
# ebpf load mode = entry
|
||||
# update every = 1
|
||||
ebpf type format = auto
|
||||
ebpf co-re tracing = trampoline
|
||||
collect pid = real parent
|
||||
lifetime = 300
|
||||
|
|
|
@ -26,6 +26,6 @@
|
|||
# cgroups = no
|
||||
# update every = 10
|
||||
# pid table size = 32768
|
||||
collect pid = all
|
||||
collect pid = real parent
|
||||
# maps per core = yes
|
||||
lifetime = 300
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
# pid table size = 32768
|
||||
ebpf type format = auto
|
||||
ebpf co-re tracing = trampoline
|
||||
collect pid = all
|
||||
collect pid = real parent
|
||||
# maps per core = yes
|
||||
lifetime = 300
|
||||
|
||||
|
|
|
@ -30,6 +30,6 @@
|
|||
# pid table size = 32768
|
||||
ebpf type format = auto
|
||||
ebpf co-re tracing = trampoline
|
||||
collect pid = all
|
||||
collect pid = real parent
|
||||
# maps per core = yes
|
||||
lifetime = 300
|
||||
|
|
|
@ -31,5 +31,6 @@
|
|||
# pid table size = 32768
|
||||
ebpf type format = auto
|
||||
ebpf co-re tracing = trampoline
|
||||
collect pid = real parent
|
||||
# maps per core = yes
|
||||
lifetime = 300
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#define NETDATA_EBPF_OLD_CONFIG_FILE "ebpf.conf"
|
||||
#define NETDATA_EBPF_CONFIG_FILE "ebpf.d.conf"
|
||||
|
||||
extern size_t ebpf_hash_table_pids_count;
|
||||
#ifdef LIBBPF_MAJOR_VERSION // BTF code
|
||||
#include "cachestat.skel.h"
|
||||
#include "dc.skel.h"
|
||||
|
@ -122,34 +123,6 @@ typedef struct netdata_ebpf_judy_pid_stats {
|
|||
} netdata_ebpf_judy_pid_stats_t;
|
||||
|
||||
extern ebpf_module_t ebpf_modules[];
|
||||
enum ebpf_main_index {
|
||||
EBPF_MODULE_PROCESS_IDX,
|
||||
EBPF_MODULE_SOCKET_IDX,
|
||||
EBPF_MODULE_CACHESTAT_IDX,
|
||||
EBPF_MODULE_SYNC_IDX,
|
||||
EBPF_MODULE_DCSTAT_IDX,
|
||||
EBPF_MODULE_SWAP_IDX,
|
||||
EBPF_MODULE_VFS_IDX,
|
||||
EBPF_MODULE_FILESYSTEM_IDX,
|
||||
EBPF_MODULE_DISK_IDX,
|
||||
EBPF_MODULE_MOUNT_IDX,
|
||||
EBPF_MODULE_FD_IDX,
|
||||
EBPF_MODULE_HARDIRQ_IDX,
|
||||
EBPF_MODULE_SOFTIRQ_IDX,
|
||||
EBPF_MODULE_OOMKILL_IDX,
|
||||
EBPF_MODULE_SHM_IDX,
|
||||
EBPF_MODULE_MDFLUSH_IDX,
|
||||
EBPF_MODULE_FUNCTION_IDX,
|
||||
/* THREADS MUST BE INCLUDED BEFORE THIS COMMENT */
|
||||
EBPF_OPTION_ALL_CHARTS,
|
||||
EBPF_OPTION_VERSION,
|
||||
EBPF_OPTION_HELP,
|
||||
EBPF_OPTION_GLOBAL_CHART,
|
||||
EBPF_OPTION_RETURN_MODE,
|
||||
EBPF_OPTION_LEGACY,
|
||||
EBPF_OPTION_CORE,
|
||||
EBPF_OPTION_UNITTEST
|
||||
};
|
||||
|
||||
typedef struct ebpf_tracepoint {
|
||||
bool enabled;
|
||||
|
@ -380,6 +353,7 @@ void ebpf_read_local_addresses_unsafe();
|
|||
extern ebpf_filesystem_partitions_t localfs[];
|
||||
extern ebpf_sync_syscalls_t local_syscalls[];
|
||||
extern bool ebpf_plugin_exit;
|
||||
extern uint64_t collect_pids;
|
||||
|
||||
static inline bool ebpf_plugin_stop(void) {
|
||||
return ebpf_plugin_exit || nd_thread_signaled_to_cancel();
|
||||
|
|
|
@ -21,37 +21,11 @@ void ebpf_aral_init(void)
|
|||
max_elements = NETDATA_EBPF_ALLOC_MIN_ELEMENTS;
|
||||
}
|
||||
|
||||
ebpf_aral_apps_pid_stat = ebpf_allocate_pid_aral("ebpf_pid_stat", sizeof(struct ebpf_pid_stat));
|
||||
|
||||
#ifdef NETDATA_DEV_MODE
|
||||
netdata_log_info("Plugin is using ARAL with values %d", NETDATA_EBPF_ALLOC_MAX_PID);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* eBPF pid stat get
|
||||
*
|
||||
* Get a ebpf_pid_stat entry to be used with a specific PID.
|
||||
*
|
||||
* @return it returns the address on success.
|
||||
*/
|
||||
struct ebpf_pid_stat *ebpf_pid_stat_get(void)
|
||||
{
|
||||
struct ebpf_pid_stat *target = aral_mallocz(ebpf_aral_apps_pid_stat);
|
||||
memset(target, 0, sizeof(struct ebpf_pid_stat));
|
||||
return target;
|
||||
}
|
||||
|
||||
/**
|
||||
* eBPF target release
|
||||
*
|
||||
* @param stat Release a target after usage.
|
||||
*/
|
||||
void ebpf_pid_stat_release(struct ebpf_pid_stat *stat)
|
||||
{
|
||||
aral_freez(ebpf_aral_apps_pid_stat, stat);
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// internal flags
|
||||
// handled in code (automatically set)
|
||||
|
@ -332,11 +306,11 @@ int ebpf_read_apps_groups_conf(struct ebpf_target **agdt, struct ebpf_target **a
|
|||
|
||||
#define MAX_CMDLINE 16384
|
||||
|
||||
struct ebpf_pid_stat **ebpf_all_pids = NULL; // to avoid allocations, we pre-allocate the
|
||||
// the entire pid space.
|
||||
struct ebpf_pid_stat *ebpf_root_of_pids = NULL; // global list of all processes running
|
||||
ebpf_pid_data_t *ebpf_pids = NULL; // to avoid allocations, we pre-allocate the entire pid space.
|
||||
ebpf_pid_data_t *ebpf_pids_link_list = NULL; // global list of all processes running
|
||||
|
||||
size_t ebpf_all_pids_count = 0; // the number of processes running
|
||||
size_t ebpf_all_pids_count = 0; // the number of processes running read from /proc
|
||||
size_t ebpf_hash_table_pids_count = 0; // the number of tasks in our hash tables
|
||||
|
||||
struct ebpf_target
|
||||
*apps_groups_default_target = NULL, // the default target
|
||||
|
@ -388,110 +362,12 @@ static inline void debug_log_dummy(void)
|
|||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Managed log
|
||||
*
|
||||
* Store log information if it is necessary.
|
||||
*
|
||||
* @param p the pid stat structure
|
||||
* @param log the log id
|
||||
* @param status the return from a function.
|
||||
*
|
||||
* @return It returns the status value.
|
||||
*/
|
||||
static inline int managed_log(struct ebpf_pid_stat *p, uint32_t log, int status)
|
||||
{
|
||||
if (unlikely(!status)) {
|
||||
// netdata_log_error("command failed log %u, errno %d", log, errno);
|
||||
|
||||
if (unlikely(debug_enabled || errno != ENOENT)) {
|
||||
if (unlikely(debug_enabled || !(p->log_thrown & log))) {
|
||||
p->log_thrown |= log;
|
||||
switch (log) {
|
||||
case PID_LOG_IO:
|
||||
netdata_log_error(
|
||||
"Cannot process %s/proc/%d/io (command '%s')", netdata_configured_host_prefix, p->pid,
|
||||
p->comm);
|
||||
break;
|
||||
|
||||
case PID_LOG_STATUS:
|
||||
netdata_log_error(
|
||||
"Cannot process %s/proc/%d/status (command '%s')", netdata_configured_host_prefix, p->pid,
|
||||
p->comm);
|
||||
break;
|
||||
|
||||
case PID_LOG_CMDLINE:
|
||||
netdata_log_error(
|
||||
"Cannot process %s/proc/%d/cmdline (command '%s')", netdata_configured_host_prefix, p->pid,
|
||||
p->comm);
|
||||
break;
|
||||
|
||||
case PID_LOG_FDS:
|
||||
netdata_log_error(
|
||||
"Cannot process entries in %s/proc/%d/fd (command '%s')", netdata_configured_host_prefix,
|
||||
p->pid, p->comm);
|
||||
break;
|
||||
|
||||
case PID_LOG_STAT:
|
||||
break;
|
||||
|
||||
default:
|
||||
netdata_log_error("unhandled error for pid %d, command '%s'", p->pid, p->comm);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
errno_clear();
|
||||
} else if (unlikely(p->log_thrown & log)) {
|
||||
// netdata_log_error("unsetting log %u on pid %d", log, p->pid);
|
||||
p->log_thrown &= ~log;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get PID entry
|
||||
*
|
||||
* Get or allocate the PID entry for the specified pid.
|
||||
*
|
||||
* @param pid the pid to search the data.
|
||||
* @param tgid the task group id
|
||||
*
|
||||
* @return It returns the pid entry structure
|
||||
*/
|
||||
ebpf_pid_stat_t *ebpf_get_pid_entry(pid_t pid, pid_t tgid)
|
||||
{
|
||||
ebpf_pid_stat_t *ptr = ebpf_all_pids[pid];
|
||||
if (unlikely(ptr)) {
|
||||
if (!ptr->ppid && tgid)
|
||||
ptr->ppid = tgid;
|
||||
return ebpf_all_pids[pid];
|
||||
}
|
||||
|
||||
struct ebpf_pid_stat *p = ebpf_pid_stat_get();
|
||||
|
||||
if (likely(ebpf_root_of_pids))
|
||||
ebpf_root_of_pids->prev = p;
|
||||
|
||||
p->next = ebpf_root_of_pids;
|
||||
ebpf_root_of_pids = p;
|
||||
|
||||
p->pid = pid;
|
||||
p->ppid = tgid;
|
||||
|
||||
ebpf_all_pids[pid] = p;
|
||||
ebpf_all_pids_count++;
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
/**
|
||||
* Assign the PID to a target.
|
||||
*
|
||||
* @param p the pid_stat structure to assign for a target.
|
||||
*/
|
||||
static inline void assign_target_to_pid(struct ebpf_pid_stat *p)
|
||||
static inline void assign_target_to_pid(ebpf_pid_data_t *p)
|
||||
{
|
||||
targets_assignment_counter++;
|
||||
|
||||
|
@ -499,6 +375,7 @@ static inline void assign_target_to_pid(struct ebpf_pid_stat *p)
|
|||
size_t pclen = strlen(p->comm);
|
||||
|
||||
struct ebpf_target *w;
|
||||
bool assigned = false;
|
||||
for (w = apps_groups_root_target; w; w = w->next) {
|
||||
// if(debug_enabled || (p->target && p->target->debug_enabled)) debug_log_int("\t\tcomparing '%s' with '%s'", w->compare, p->comm);
|
||||
|
||||
|
@ -521,9 +398,17 @@ static inline void assign_target_to_pid(struct ebpf_pid_stat *p)
|
|||
if (debug_enabled || (p->target && p->target->debug_enabled))
|
||||
debug_log_int("%s linked to target %s", p->comm, p->target->name);
|
||||
|
||||
w->processes++;
|
||||
assigned = true;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!assigned) {
|
||||
apps_groups_default_target->processes++;
|
||||
p->target = apps_groups_default_target;
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
@ -532,22 +417,18 @@ static inline void assign_target_to_pid(struct ebpf_pid_stat *p)
|
|||
/**
|
||||
* Read cmd line from /proc/PID/cmdline
|
||||
*
|
||||
* @param p the ebpf_pid_stat_structure.
|
||||
* @param p the ebpf_pid_data structure.
|
||||
*
|
||||
* @return It returns 1 on success and 0 otherwise.
|
||||
*/
|
||||
static inline int read_proc_pid_cmdline(struct ebpf_pid_stat *p)
|
||||
static inline int read_proc_pid_cmdline(ebpf_pid_data_t *p, char *cmdline)
|
||||
{
|
||||
static char cmdline[MAX_CMDLINE + 1];
|
||||
char filename[FILENAME_MAX + 1];
|
||||
snprintfz(filename, FILENAME_MAX, "%s/proc/%d/cmdline", netdata_configured_host_prefix, p->pid);
|
||||
|
||||
int ret = 0;
|
||||
if (unlikely(!p->cmdline_filename)) {
|
||||
char filename[FILENAME_MAX + 1];
|
||||
snprintfz(filename, FILENAME_MAX, "%s/proc/%d/cmdline", netdata_configured_host_prefix, p->pid);
|
||||
p->cmdline_filename = strdupz(filename);
|
||||
}
|
||||
|
||||
int fd = open(p->cmdline_filename, procfile_open_flags, 0666);
|
||||
int fd = open(filename, procfile_open_flags, 0666);
|
||||
if (unlikely(fd == -1))
|
||||
goto cleanup;
|
||||
|
||||
|
@ -563,21 +444,12 @@ static inline int read_proc_pid_cmdline(struct ebpf_pid_stat *p)
|
|||
cmdline[i] = ' ';
|
||||
}
|
||||
|
||||
debug_log("Read file '%s' contents: %s", p->cmdline_filename, p->cmdline);
|
||||
debug_log("Read file '%s' contents: %s", filename, p->cmdline);
|
||||
|
||||
ret = 1;
|
||||
|
||||
cleanup:
|
||||
// copy the command to the command line
|
||||
if (p->cmdline)
|
||||
freez(p->cmdline);
|
||||
p->cmdline = strdupz(p->comm);
|
||||
|
||||
rw_spinlock_write_lock(&ebpf_judy_pid.index.rw_spinlock);
|
||||
netdata_ebpf_judy_pid_stats_t *pid_ptr = ebpf_get_pid_from_judy_unsafe(&ebpf_judy_pid.index.JudyLArray, p->pid);
|
||||
if (pid_ptr)
|
||||
pid_ptr->cmdline = p->cmdline;
|
||||
rw_spinlock_write_unlock(&ebpf_judy_pid.index.rw_spinlock);
|
||||
p->cmdline[0] = '\0';
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -587,44 +459,43 @@ cleanup:
|
|||
* Assign target to pid
|
||||
*
|
||||
* @param p the pid stat structure to store the data.
|
||||
* @param ptr an useless argument.
|
||||
*/
|
||||
static inline int read_proc_pid_stat(struct ebpf_pid_stat *p, void *ptr)
|
||||
static inline int read_proc_pid_stat(ebpf_pid_data_t *p)
|
||||
{
|
||||
UNUSED(ptr);
|
||||
procfile *ff;
|
||||
|
||||
static procfile *ff = NULL;
|
||||
|
||||
if (unlikely(!p->stat_filename)) {
|
||||
char filename[FILENAME_MAX + 1];
|
||||
snprintfz(filename, FILENAME_MAX, "%s/proc/%d/stat", netdata_configured_host_prefix, p->pid);
|
||||
p->stat_filename = strdupz(filename);
|
||||
}
|
||||
|
||||
int set_quotes = (!ff) ? 1 : 0;
|
||||
char filename[FILENAME_MAX + 1];
|
||||
int ret = 0;
|
||||
snprintfz(filename, FILENAME_MAX, "%s/proc/%u/stat", netdata_configured_host_prefix, p->pid);
|
||||
|
||||
struct stat statbuf;
|
||||
if (stat(p->stat_filename, &statbuf))
|
||||
if (stat(filename, &statbuf)) {
|
||||
p->has_proc_file = 0;
|
||||
p->thread_collecting &= ~(1<<EBPF_OPTION_ALL_CHARTS);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ff = procfile_reopen(ff, p->stat_filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
|
||||
ff = procfile_open(filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
|
||||
if (unlikely(!ff))
|
||||
return 0;
|
||||
goto cleanup_pid_stat;
|
||||
|
||||
if (unlikely(set_quotes))
|
||||
procfile_set_open_close(ff, "(", ")");
|
||||
procfile_set_open_close(ff, "(", ")");
|
||||
|
||||
ff = procfile_readall(ff);
|
||||
if (unlikely(!ff))
|
||||
return 0;
|
||||
|
||||
p->last_stat_collected_usec = p->stat_collected_usec;
|
||||
p->stat_collected_usec = now_monotonic_usec();
|
||||
calls_counter++;
|
||||
goto cleanup_pid_stat;
|
||||
|
||||
char *comm = procfile_lineword(ff, 0, 1);
|
||||
p->ppid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 3));
|
||||
int32_t ppid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 3));
|
||||
|
||||
if (p->ppid == ppid && p->target)
|
||||
goto without_cmdline_target;
|
||||
|
||||
p->ppid = ppid;
|
||||
|
||||
char cmdline[MAX_CMDLINE + 1];
|
||||
p->cmdline = cmdline;
|
||||
read_proc_pid_cmdline(p, cmdline);
|
||||
if (strcmp(p->comm, comm) != 0) {
|
||||
if (unlikely(debug_enabled)) {
|
||||
if (p->comm[0])
|
||||
|
@ -634,58 +505,50 @@ static inline int read_proc_pid_stat(struct ebpf_pid_stat *p, void *ptr)
|
|||
}
|
||||
|
||||
strncpyz(p->comm, comm, EBPF_MAX_COMPARE_NAME);
|
||||
|
||||
// /proc/<pid>/cmdline
|
||||
if (likely(proc_pid_cmdline_is_needed))
|
||||
managed_log(p, PID_LOG_CMDLINE, read_proc_pid_cmdline(p));
|
||||
|
||||
assign_target_to_pid(p);
|
||||
}
|
||||
if (!p->target)
|
||||
assign_target_to_pid(p);
|
||||
|
||||
p->cmdline = NULL;
|
||||
|
||||
if (unlikely(debug_enabled || (p->target && p->target->debug_enabled)))
|
||||
debug_log_int(
|
||||
"READ PROC/PID/STAT: %s/proc/%d/stat, process: '%s' on target '%s' (dt=%llu)",
|
||||
netdata_configured_host_prefix, p->pid, p->comm, (p->target) ? p->target->name : "UNSET",
|
||||
p->stat_collected_usec - p->last_stat_collected_usec);
|
||||
"READ PROC/PID/STAT: %s/proc/%d/stat, process: '%s' on target '%s'",
|
||||
netdata_configured_host_prefix, p->pid, p->comm, (p->target) ? p->target->name : "UNSET");
|
||||
|
||||
return 1;
|
||||
without_cmdline_target:
|
||||
p->has_proc_file = 1;
|
||||
p->not_updated = 0;
|
||||
ret = 1;
|
||||
cleanup_pid_stat:
|
||||
procfile_close(ff);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Collect data for PID
|
||||
*
|
||||
* @param pid the current pid that we are working
|
||||
* @param ptr a NULL value
|
||||
*
|
||||
* @return It returns 1 on success and 0 otherwise
|
||||
*/
|
||||
static inline int ebpf_collect_data_for_pid(pid_t pid, void *ptr)
|
||||
static inline int ebpf_collect_data_for_pid(pid_t pid)
|
||||
{
|
||||
if (unlikely(pid < 0 || pid > pid_max)) {
|
||||
netdata_log_error("Invalid pid %d read (expected %d to %d). Ignoring process.", pid, 0, pid_max);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ebpf_pid_stat_t *p = ebpf_get_pid_entry(pid, 0);
|
||||
if (unlikely(!p || p->read))
|
||||
return 0;
|
||||
p->read = 1;
|
||||
|
||||
if (unlikely(!managed_log(p, PID_LOG_STAT, read_proc_pid_stat(p, ptr))))
|
||||
// there is no reason to proceed if we cannot get its status
|
||||
return 0;
|
||||
ebpf_pid_data_t *p = ebpf_get_pid_data((uint32_t)pid, 0, NULL, EBPF_OPTION_ALL_CHARTS);
|
||||
read_proc_pid_stat(p);
|
||||
|
||||
// check its parent pid
|
||||
if (unlikely(p->ppid < 0 || p->ppid > pid_max)) {
|
||||
netdata_log_error("Pid %d (command '%s') states invalid parent pid %d. Using 0.", pid, p->comm, p->ppid);
|
||||
if (unlikely( p->ppid > pid_max)) {
|
||||
netdata_log_error("Pid %d (command '%s') states invalid parent pid %u. Using 0.", pid, p->comm, p->ppid);
|
||||
p->ppid = 0;
|
||||
}
|
||||
|
||||
// mark it as updated
|
||||
p->updated = 1;
|
||||
p->keep = 0;
|
||||
p->keeploops = 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -694,14 +557,13 @@ static inline int ebpf_collect_data_for_pid(pid_t pid, void *ptr)
|
|||
*/
|
||||
static inline void link_all_processes_to_their_parents(void)
|
||||
{
|
||||
struct ebpf_pid_stat *p, *pp;
|
||||
ebpf_pid_data_t *p, *pp;
|
||||
|
||||
// link all children to their parents
|
||||
// and update children count on parents
|
||||
for (p = ebpf_root_of_pids; p; p = p->next) {
|
||||
for (p = ebpf_pids_link_list; p; p = p->next) {
|
||||
// for each process found
|
||||
|
||||
p->sortlist = 0;
|
||||
p->parent = NULL;
|
||||
|
||||
if (unlikely(!p->ppid)) {
|
||||
|
@ -709,16 +571,15 @@ static inline void link_all_processes_to_their_parents(void)
|
|||
continue;
|
||||
}
|
||||
|
||||
pp = ebpf_all_pids[p->ppid];
|
||||
if (likely(pp)) {
|
||||
pp = &ebpf_pids[p->ppid];
|
||||
if (likely(pp->pid)) {
|
||||
p->parent = pp;
|
||||
pp->children_count++;
|
||||
|
||||
if (unlikely(debug_enabled || (p->target && p->target->debug_enabled)))
|
||||
debug_log_int(
|
||||
"child %d (%s, %s) on target '%s' has parent %d (%s, %s).", p->pid, p->comm,
|
||||
p->updated ? "running" : "exited", (p->target) ? p->target->name : "UNSET", pp->pid, pp->comm,
|
||||
pp->updated ? "running" : "exited");
|
||||
"child %d (%s) on target '%s' has parent %d (%s).", p->pid, p->comm,
|
||||
(p->target) ? p->target->name : "UNSET", pp->pid, pp->comm);
|
||||
} else {
|
||||
p->parent = NULL;
|
||||
debug_log("pid %d %s states parent %d, but the later does not exist.", p->pid, p->comm, p->ppid);
|
||||
|
@ -731,7 +592,7 @@ static inline void link_all_processes_to_their_parents(void)
|
|||
*/
|
||||
static void apply_apps_groups_targets_inheritance(void)
|
||||
{
|
||||
struct ebpf_pid_stat *p = NULL;
|
||||
struct ebpf_pid_data *p = NULL;
|
||||
|
||||
// children that do not have a target
|
||||
// inherit their target from their parent
|
||||
|
@ -740,7 +601,7 @@ static void apply_apps_groups_targets_inheritance(void)
|
|||
if (unlikely(debug_enabled))
|
||||
loops++;
|
||||
found = 0;
|
||||
for (p = ebpf_root_of_pids; p; p = p->next) {
|
||||
for (p = ebpf_pids_link_list; p; p = p->next) {
|
||||
// if this process does not have a target
|
||||
// and it has a parent
|
||||
// and its parent has a target
|
||||
|
@ -751,7 +612,7 @@ static void apply_apps_groups_targets_inheritance(void)
|
|||
|
||||
if (debug_enabled || (p->target && p->target->debug_enabled))
|
||||
debug_log_int(
|
||||
"TARGET INHERITANCE: %s is inherited by %d (%s) from its parent %d (%s).", p->target->name,
|
||||
"TARGET INHERITANCE: %s is inherited by %u (%s) from its parent %d (%s).", p->target->name,
|
||||
p->pid, p->comm, p->parent->pid, p->parent->comm);
|
||||
}
|
||||
}
|
||||
|
@ -766,7 +627,7 @@ static void apply_apps_groups_targets_inheritance(void)
|
|||
loops++;
|
||||
found = 0;
|
||||
|
||||
for (p = ebpf_root_of_pids; p; p = p->next) {
|
||||
for (p = ebpf_pids_link_list; p; p = p->next) {
|
||||
if (unlikely(!p->sortlist && !p->children_count))
|
||||
p->sortlist = sortlist++;
|
||||
|
||||
|
@ -802,17 +663,15 @@ static void apply_apps_groups_targets_inheritance(void)
|
|||
}
|
||||
|
||||
// init goes always to default target
|
||||
if (ebpf_all_pids[INIT_PID])
|
||||
ebpf_all_pids[INIT_PID]->target = apps_groups_default_target;
|
||||
ebpf_pids[INIT_PID].target = apps_groups_default_target;
|
||||
|
||||
// pid 0 goes always to default target
|
||||
if (ebpf_all_pids[0])
|
||||
ebpf_all_pids[0]->target = apps_groups_default_target;
|
||||
ebpf_pids[0].target = apps_groups_default_target;
|
||||
|
||||
// give a default target on all top level processes
|
||||
if (unlikely(debug_enabled))
|
||||
loops++;
|
||||
for (p = ebpf_root_of_pids; p; p = p->next) {
|
||||
for (p = ebpf_pids_link_list; p; p = p->next) {
|
||||
// if the process is not merged itself
|
||||
// then is is a top level process
|
||||
if (unlikely(!p->merged && !p->target))
|
||||
|
@ -823,8 +682,7 @@ static void apply_apps_groups_targets_inheritance(void)
|
|||
p->sortlist = sortlist++;
|
||||
}
|
||||
|
||||
if (ebpf_all_pids[1])
|
||||
ebpf_all_pids[1]->sortlist = sortlist++;
|
||||
ebpf_pids[1].sortlist = sortlist++;
|
||||
|
||||
// give a target to all merged child processes
|
||||
found = 1;
|
||||
|
@ -832,7 +690,7 @@ static void apply_apps_groups_targets_inheritance(void)
|
|||
if (unlikely(debug_enabled))
|
||||
loops++;
|
||||
found = 0;
|
||||
for (p = ebpf_root_of_pids; p; p = p->next) {
|
||||
for (p = ebpf_pids_link_list; p; p = p->next) {
|
||||
if (unlikely(!p->target && p->merged && p->parent && p->parent->target)) {
|
||||
p->target = p->parent->target;
|
||||
found++;
|
||||
|
@ -872,29 +730,21 @@ static inline void post_aggregate_targets(struct ebpf_target *root)
|
|||
*
|
||||
* @param pid the PID that will be removed.
|
||||
*/
|
||||
static inline void ebpf_del_pid_entry(pid_t pid)
|
||||
void ebpf_del_pid_entry(pid_t pid)
|
||||
{
|
||||
struct ebpf_pid_stat *p = ebpf_all_pids[pid];
|
||||
|
||||
if (unlikely(!p)) {
|
||||
netdata_log_error("attempted to free pid %d that is not allocated.", pid);
|
||||
return;
|
||||
}
|
||||
ebpf_pid_data_t *p = &ebpf_pids[pid];
|
||||
|
||||
debug_log("process %d %s exited, deleting it.", pid, p->comm);
|
||||
|
||||
if (ebpf_root_of_pids == p)
|
||||
ebpf_root_of_pids = p->next;
|
||||
if (ebpf_pids_link_list == p)
|
||||
ebpf_pids_link_list = p->next;
|
||||
|
||||
if (p->next)
|
||||
p->next->prev = p->prev;
|
||||
if (p->prev)
|
||||
p->prev->next = p->next;
|
||||
|
||||
freez(p->stat_filename);
|
||||
freez(p->status_filename);
|
||||
freez(p->io_filename);
|
||||
freez(p->cmdline_filename);
|
||||
memset(p, 0, sizeof(ebpf_pid_data_t));
|
||||
|
||||
rw_spinlock_write_lock(&ebpf_judy_pid.index.rw_spinlock);
|
||||
netdata_ebpf_judy_pid_stats_t *pid_ptr = ebpf_get_pid_from_judy_unsafe(&ebpf_judy_pid.index.JudyLArray, p->pid);
|
||||
|
@ -914,57 +764,20 @@ static inline void ebpf_del_pid_entry(pid_t pid)
|
|||
}
|
||||
rw_spinlock_write_unlock(&ebpf_judy_pid.index.rw_spinlock);
|
||||
|
||||
freez(p->cmdline);
|
||||
ebpf_pid_stat_release(p);
|
||||
|
||||
ebpf_all_pids[pid] = NULL;
|
||||
ebpf_all_pids_count--;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get command string associated with a PID.
|
||||
* This can only safely be used when holding the `collect_data_mutex` lock.
|
||||
*
|
||||
* @param pid the pid to search the data.
|
||||
* @param n the maximum amount of bytes to copy into dest.
|
||||
* if this is greater than the size of the command, it is clipped.
|
||||
* @param dest the target memory buffer to write the command into.
|
||||
* @return -1 if the PID hasn't been scraped yet, 0 otherwise.
|
||||
*/
|
||||
int get_pid_comm(pid_t pid, size_t n, char *dest)
|
||||
{
|
||||
struct ebpf_pid_stat *stat;
|
||||
|
||||
stat = ebpf_all_pids[pid];
|
||||
if (unlikely(stat == NULL)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (unlikely(n > sizeof(stat->comm))) {
|
||||
n = sizeof(stat->comm);
|
||||
}
|
||||
|
||||
strncpyz(dest, stat->comm, n);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove PIDs when they are not running more.
|
||||
*/
|
||||
void ebpf_cleanup_exited_pids(int max)
|
||||
static void ebpf_cleanup_exited_pids()
|
||||
{
|
||||
struct ebpf_pid_stat *p = NULL;
|
||||
|
||||
for (p = ebpf_root_of_pids; p;) {
|
||||
if (p->not_updated > max) {
|
||||
if (unlikely(debug_enabled && (p->keep || p->keeploops)))
|
||||
debug_log(" > CLEANUP cannot keep exited process %d (%s) anymore - removing it.", p->pid, p->comm);
|
||||
|
||||
pid_t r = p->pid;
|
||||
p = p->next;
|
||||
|
||||
ebpf_del_pid_entry(r);
|
||||
ebpf_pid_data_t *p = NULL;
|
||||
for (p = ebpf_pids_link_list; p;) {
|
||||
if (!p->has_proc_file) {
|
||||
ebpf_release_pid_data(p, 0, p->pid, EBPF_OPTION_ALL_CHARTS);
|
||||
}
|
||||
|
||||
p = p->next;
|
||||
}
|
||||
}
|
||||
|
@ -974,14 +787,14 @@ void ebpf_cleanup_exited_pids(int max)
|
|||
*
|
||||
* @return It returns 0 on success and -1 otherwise.
|
||||
*/
|
||||
static inline void read_proc_filesystem()
|
||||
static int ebpf_read_proc_filesystem()
|
||||
{
|
||||
char dirname[FILENAME_MAX + 1];
|
||||
|
||||
snprintfz(dirname, FILENAME_MAX, "%s/proc", netdata_configured_host_prefix);
|
||||
DIR *dir = opendir(dirname);
|
||||
if (!dir)
|
||||
return;
|
||||
return -1;
|
||||
|
||||
struct dirent *de = NULL;
|
||||
|
||||
|
@ -997,9 +810,11 @@ static inline void read_proc_filesystem()
|
|||
if (unlikely(endptr == de->d_name || *endptr != '\0'))
|
||||
continue;
|
||||
|
||||
ebpf_collect_data_for_pid(pid, NULL);
|
||||
ebpf_collect_data_for_pid(pid);
|
||||
}
|
||||
closedir(dir);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1009,17 +824,17 @@ static inline void read_proc_filesystem()
|
|||
* @param p the pid with information to update
|
||||
* @param o never used
|
||||
*/
|
||||
static inline void aggregate_pid_on_target(struct ebpf_target *w, struct ebpf_pid_stat *p, struct ebpf_target *o)
|
||||
static inline void aggregate_pid_on_target(struct ebpf_target *w, ebpf_pid_data_t *p, struct ebpf_target *o)
|
||||
{
|
||||
UNUSED(o);
|
||||
|
||||
if (unlikely(!p->updated)) {
|
||||
if (unlikely(!p->has_proc_file)) {
|
||||
// the process is not running
|
||||
return;
|
||||
}
|
||||
|
||||
if (unlikely(!w)) {
|
||||
netdata_log_error("pid %d %s was left without a target!", p->pid, p->comm);
|
||||
netdata_log_error("pid %u %s was left without a target!", p->pid, p->comm);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1061,19 +876,18 @@ void ebpf_process_apps_accumulator(ebpf_process_stat_t *out, int maps_per_core)
|
|||
void ebpf_process_sum_values_for_pids(ebpf_process_stat_t *process, struct ebpf_pid_on_target *root)
|
||||
{
|
||||
memset(process, 0, sizeof(ebpf_process_stat_t));
|
||||
while (root) {
|
||||
for (; root; root = root->next) {
|
||||
int32_t pid = root->pid;
|
||||
ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
|
||||
if (local_pid) {
|
||||
ebpf_process_stat_t *in = &local_pid->process;
|
||||
process->task_err += in->task_err;
|
||||
process->release_call += in->release_call;
|
||||
process->exit_call += in->exit_call;
|
||||
process->create_thread += in->create_thread;
|
||||
process->create_process += in->create_process;
|
||||
}
|
||||
ebpf_pid_data_t *local_pid = ebpf_get_pid_data(pid, 0, NULL, EBPF_MODULE_PROCESS_IDX);
|
||||
ebpf_publish_process_t *in = local_pid->process;
|
||||
if (!in)
|
||||
continue;
|
||||
|
||||
root = root->next;
|
||||
process->task_err += in->task_err;
|
||||
process->release_call += in->release_call;
|
||||
process->exit_call += in->exit_call;
|
||||
process->create_thread += in->create_thread;
|
||||
process->create_process += in->create_process;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1085,51 +899,38 @@ void ebpf_process_sum_values_for_pids(ebpf_process_stat_t *process, struct ebpf_
|
|||
*
|
||||
* @param tbl_pid_stats_fd The mapped file descriptor for the hash table.
|
||||
* @param maps_per_core do I have hash maps per core?
|
||||
* @param max_period max period to wait before remove from hash table.
|
||||
*/
|
||||
void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core)
|
||||
void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core, uint32_t max_period)
|
||||
{
|
||||
if (unlikely(!ebpf_all_pids))
|
||||
if (tbl_pid_stats_fd == -1)
|
||||
return;
|
||||
|
||||
struct ebpf_pid_stat *pids = ebpf_root_of_pids; // global list of all processes running
|
||||
while (pids) {
|
||||
if (pids->updated_twice) {
|
||||
pids->read = 0; // mark it as not read, so that collect_data_for_pid() will read it
|
||||
pids->updated = 0;
|
||||
pids->merged = 0;
|
||||
pids->children_count = 0;
|
||||
pids->parent = NULL;
|
||||
} else {
|
||||
if (pids->updated)
|
||||
pids->updated_twice = 1;
|
||||
}
|
||||
|
||||
pids = pids->next;
|
||||
}
|
||||
|
||||
read_proc_filesystem();
|
||||
|
||||
pids = ebpf_root_of_pids; // global list of all processes running
|
||||
size_t length = sizeof(ebpf_process_stat_t);
|
||||
if (maps_per_core)
|
||||
length *= ebpf_nprocs;
|
||||
|
||||
if (tbl_pid_stats_fd != -1) {
|
||||
size_t length = sizeof(ebpf_process_stat_t);
|
||||
if (maps_per_core)
|
||||
length *= ebpf_nprocs;
|
||||
|
||||
uint32_t key = 0, next_key = 0;
|
||||
while (bpf_map_get_next_key(tbl_pid_stats_fd, &key, &next_key) == 0) {
|
||||
ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(key, 0);
|
||||
if (!local_pid)
|
||||
goto end_process_loop;
|
||||
|
||||
ebpf_process_stat_t *w = &local_pid->process;
|
||||
if (bpf_map_lookup_elem(tbl_pid_stats_fd, &key, process_stat_vector)) {
|
||||
goto end_process_loop;
|
||||
}
|
||||
|
||||
ebpf_process_apps_accumulator(process_stat_vector, maps_per_core);
|
||||
|
||||
memcpy(w, process_stat_vector, sizeof(ebpf_process_stat_t));
|
||||
ebpf_pid_data_t *local_pid = ebpf_get_pid_data(key, 0, NULL, EBPF_MODULE_PROCESS_IDX);
|
||||
ebpf_publish_process_t *w = local_pid->process;
|
||||
if (!w)
|
||||
local_pid->process = w = ebpf_process_allocate_publish();
|
||||
|
||||
w->create_thread = process_stat_vector[0].create_thread;
|
||||
w->exit_call = process_stat_vector[0].exit_call;
|
||||
w->create_thread = process_stat_vector[0].create_thread;
|
||||
w->create_process = process_stat_vector[0].create_process;
|
||||
w->release_call = process_stat_vector[0].release_call;
|
||||
w->task_err = process_stat_vector[0].task_err;
|
||||
|
||||
end_process_loop:
|
||||
memset(process_stat_vector, 0, length);
|
||||
|
@ -1137,19 +938,6 @@ end_process_loop:
|
|||
}
|
||||
}
|
||||
|
||||
link_all_processes_to_their_parents();
|
||||
|
||||
apply_apps_groups_targets_inheritance();
|
||||
|
||||
apps_groups_targets_count = zero_all_targets(apps_groups_root_target);
|
||||
|
||||
// this has to be done, before the cleanup
|
||||
// // concentrate everything on the targets
|
||||
for (pids = ebpf_root_of_pids; pids; pids = pids->next)
|
||||
aggregate_pid_on_target(pids->target, pids, NULL);
|
||||
|
||||
post_aggregate_targets(apps_groups_root_target);
|
||||
|
||||
struct ebpf_target *w;
|
||||
for (w = apps_groups_root_target; w; w = w->next) {
|
||||
if (unlikely(!(w->processes)))
|
||||
|
@ -1157,4 +945,34 @@ end_process_loop:
|
|||
|
||||
ebpf_process_sum_values_for_pids(&w->process, w->root_pid);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
void ebpf_parse_proc_files()
|
||||
{
|
||||
ebpf_pid_data_t *pids = ebpf_pids_link_list;
|
||||
while (pids) {
|
||||
pids->not_updated = EBPF_CLEANUP_FACTOR;
|
||||
pids->merged = 0;
|
||||
pids->children_count = 0;
|
||||
|
||||
pids = pids->next;
|
||||
}
|
||||
|
||||
if (ebpf_read_proc_filesystem())
|
||||
return;
|
||||
|
||||
link_all_processes_to_their_parents();
|
||||
|
||||
apply_apps_groups_targets_inheritance();
|
||||
|
||||
apps_groups_targets_count = zero_all_targets(apps_groups_root_target);
|
||||
|
||||
for (pids = ebpf_pids_link_list; pids; pids = pids->next)
|
||||
aggregate_pid_on_target(pids->target, pids, NULL);
|
||||
|
||||
ebpf_cleanup_exited_pids();
|
||||
}
|
||||
|
|
|
@ -39,10 +39,39 @@
|
|||
#include "ebpf_swap.h"
|
||||
#include "ebpf_vfs.h"
|
||||
|
||||
#define EBPF_MAX_COMPARE_NAME 100
|
||||
#define EBPF_MAX_COMPARE_NAME 95
|
||||
#define EBPF_MAX_NAME 100
|
||||
|
||||
#define EBPF_CLEANUP_FACTOR 10
|
||||
#define EBPF_CLEANUP_FACTOR 2
|
||||
|
||||
enum ebpf_main_index {
|
||||
EBPF_MODULE_PROCESS_IDX,
|
||||
EBPF_MODULE_SOCKET_IDX,
|
||||
EBPF_MODULE_CACHESTAT_IDX,
|
||||
EBPF_MODULE_SYNC_IDX,
|
||||
EBPF_MODULE_DCSTAT_IDX,
|
||||
EBPF_MODULE_SWAP_IDX,
|
||||
EBPF_MODULE_VFS_IDX,
|
||||
EBPF_MODULE_FILESYSTEM_IDX,
|
||||
EBPF_MODULE_DISK_IDX,
|
||||
EBPF_MODULE_MOUNT_IDX,
|
||||
EBPF_MODULE_FD_IDX,
|
||||
EBPF_MODULE_HARDIRQ_IDX,
|
||||
EBPF_MODULE_SOFTIRQ_IDX,
|
||||
EBPF_MODULE_OOMKILL_IDX,
|
||||
EBPF_MODULE_SHM_IDX,
|
||||
EBPF_MODULE_MDFLUSH_IDX,
|
||||
EBPF_MODULE_FUNCTION_IDX,
|
||||
/* THREADS MUST BE INCLUDED BEFORE THIS COMMENT */
|
||||
EBPF_OPTION_ALL_CHARTS,
|
||||
EBPF_OPTION_VERSION,
|
||||
EBPF_OPTION_HELP,
|
||||
EBPF_OPTION_GLOBAL_CHART,
|
||||
EBPF_OPTION_RETURN_MODE,
|
||||
EBPF_OPTION_LEGACY,
|
||||
EBPF_OPTION_CORE,
|
||||
EBPF_OPTION_UNITTEST
|
||||
};
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Structures used to read information from kernel ring
|
||||
|
@ -63,10 +92,21 @@ typedef struct ebpf_process_stat {
|
|||
|
||||
//Counter
|
||||
uint32_t task_err;
|
||||
|
||||
uint8_t removeme;
|
||||
} ebpf_process_stat_t;
|
||||
|
||||
typedef struct __attribute__((packed)) ebpf_publish_process {
|
||||
uint64_t ct;
|
||||
|
||||
//Counter
|
||||
uint32_t exit_call;
|
||||
uint32_t release_call;
|
||||
uint32_t create_process;
|
||||
uint32_t create_thread;
|
||||
|
||||
//Counter
|
||||
uint32_t task_err;
|
||||
} ebpf_publish_process_t;
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// pid_stat
|
||||
//
|
||||
|
@ -108,21 +148,198 @@ struct ebpf_target {
|
|||
struct ebpf_target *target; // the one that will be reported to netdata
|
||||
struct ebpf_target *next;
|
||||
};
|
||||
|
||||
extern struct ebpf_target *apps_groups_default_target;
|
||||
extern struct ebpf_target *apps_groups_root_target;
|
||||
extern struct ebpf_target *users_root_target;
|
||||
extern struct ebpf_target *groups_root_target;
|
||||
extern uint64_t collect_pids;
|
||||
|
||||
typedef struct __attribute__((packed)) ebpf_pid_data {
|
||||
uint32_t pid;
|
||||
uint32_t ppid;
|
||||
uint64_t thread_collecting;
|
||||
|
||||
char comm[EBPF_MAX_COMPARE_NAME + 1];
|
||||
char *cmdline;
|
||||
|
||||
uint32_t has_proc_file;
|
||||
uint32_t not_updated;
|
||||
int children_count; // number of processes directly referencing this
|
||||
int merged;
|
||||
int sortlist; // higher numbers = top on the process tree
|
||||
|
||||
struct ebpf_target *target; // the one that will be reported to netdata
|
||||
struct ebpf_pid_data *parent;
|
||||
struct ebpf_pid_data *prev;
|
||||
struct ebpf_pid_data *next;
|
||||
|
||||
netdata_publish_fd_stat_t *fd;
|
||||
netdata_publish_swap_t *swap;
|
||||
netdata_publish_shm_t *shm; // this has a leak issue
|
||||
netdata_publish_dcstat_t *dc;
|
||||
netdata_publish_vfs_t *vfs;
|
||||
netdata_publish_cachestat_t *cachestat;
|
||||
ebpf_publish_process_t *process;
|
||||
ebpf_socket_publish_apps_t *socket;
|
||||
|
||||
} ebpf_pid_data_t;
|
||||
|
||||
extern ebpf_pid_data_t *ebpf_pids;
|
||||
extern ebpf_pid_data_t *ebpf_pids_link_list;
|
||||
extern size_t ebpf_all_pids_count;
|
||||
extern size_t ebpf_hash_table_pids_count;
|
||||
void ebpf_del_pid_entry(pid_t pid);
|
||||
|
||||
static inline void *ebpf_cachestat_allocate_publish()
|
||||
{
|
||||
ebpf_hash_table_pids_count++;
|
||||
return callocz(1, sizeof(netdata_publish_cachestat_t));
|
||||
}
|
||||
|
||||
static inline void ebpf_cachestat_release_publish(netdata_publish_cachestat_t *ptr)
|
||||
{
|
||||
ebpf_hash_table_pids_count--;
|
||||
freez(ptr);
|
||||
}
|
||||
|
||||
static inline void *ebpf_dcallocate_publish()
|
||||
{
|
||||
ebpf_hash_table_pids_count++;
|
||||
return callocz(1, sizeof(netdata_publish_dcstat_t));
|
||||
}
|
||||
|
||||
static inline void ebpf_dc_release_publish(netdata_publish_dcstat_t *ptr)
|
||||
{
|
||||
ebpf_hash_table_pids_count--;
|
||||
freez(ptr);
|
||||
}
|
||||
|
||||
static inline void *ebpf_fd_allocate_publish()
|
||||
{
|
||||
ebpf_hash_table_pids_count++;
|
||||
return callocz(1, sizeof(netdata_publish_fd_stat_t));
|
||||
}
|
||||
|
||||
static inline void ebpf_fd_release_publish(netdata_publish_fd_stat_t *ptr)
|
||||
{
|
||||
ebpf_hash_table_pids_count--;
|
||||
freez(ptr);
|
||||
}
|
||||
|
||||
static inline void *ebpf_shm_allocate_publish()
|
||||
{
|
||||
ebpf_hash_table_pids_count++;
|
||||
return callocz(1, sizeof(netdata_publish_shm_t));
|
||||
}
|
||||
|
||||
static inline void ebpf_shm_release_publish(netdata_publish_shm_t *ptr)
|
||||
{
|
||||
ebpf_hash_table_pids_count--;
|
||||
freez(ptr);
|
||||
}
|
||||
|
||||
static inline void *ebpf_socket_allocate_publish()
|
||||
{
|
||||
ebpf_hash_table_pids_count++;
|
||||
return callocz(1, sizeof(ebpf_socket_publish_apps_t));
|
||||
}
|
||||
|
||||
static inline void ebpf_socket_release_publish(ebpf_socket_publish_apps_t *ptr)
|
||||
{
|
||||
ebpf_hash_table_pids_count--;
|
||||
freez(ptr);
|
||||
}
|
||||
|
||||
static inline void *ebpf_swap_allocate_publish_swap()
|
||||
{
|
||||
ebpf_hash_table_pids_count++;
|
||||
return callocz(1, sizeof(netdata_publish_swap_t));
|
||||
}
|
||||
|
||||
static inline void ebpf_release_publish_swap(netdata_publish_swap_t *ptr)
|
||||
{
|
||||
ebpf_hash_table_pids_count--;
|
||||
freez(ptr);
|
||||
}
|
||||
|
||||
static inline void *ebpf_vfs_allocate_publish()
|
||||
{
|
||||
ebpf_hash_table_pids_count++;
|
||||
return callocz(1, sizeof(netdata_publish_vfs_t));
|
||||
}
|
||||
|
||||
static inline void ebpf_vfs_release_publish(netdata_publish_vfs_t *ptr)
|
||||
{
|
||||
ebpf_hash_table_pids_count--;
|
||||
freez(ptr);
|
||||
}
|
||||
|
||||
static inline void *ebpf_process_allocate_publish()
|
||||
{
|
||||
ebpf_hash_table_pids_count++;
|
||||
return callocz(1, sizeof(ebpf_publish_process_t));
|
||||
}
|
||||
|
||||
static inline void ebpf_process_release_publish(ebpf_publish_process_t *ptr)
|
||||
{
|
||||
ebpf_hash_table_pids_count--;
|
||||
freez(ptr);
|
||||
}
|
||||
|
||||
static inline ebpf_pid_data_t *ebpf_get_pid_data(uint32_t pid, uint32_t tgid, char *name, uint32_t idx) {
|
||||
// To add pids to target here will do host very slow
|
||||
|
||||
ebpf_pid_data_t *ptr = &ebpf_pids[pid];
|
||||
// The caller is getting data to work.
|
||||
if (!name && idx != EBPF_OPTION_ALL_CHARTS)
|
||||
return ptr;
|
||||
|
||||
ptr->thread_collecting |= 1<<idx;
|
||||
if (ptr->pid == pid) {
|
||||
return ptr;
|
||||
}
|
||||
|
||||
ptr->pid = pid;
|
||||
ptr->ppid = tgid;
|
||||
|
||||
if (name)
|
||||
strncpyz(ptr->comm, name, EBPF_MAX_COMPARE_NAME);
|
||||
|
||||
if (idx == EBPF_OPTION_ALL_CHARTS) {
|
||||
// We are going to use only with pids listed in /proc, other PIDs are associated to it
|
||||
if (likely(ebpf_pids_link_list))
|
||||
ebpf_pids_link_list->prev = ptr;
|
||||
|
||||
ptr->next = ebpf_pids_link_list;
|
||||
ebpf_pids_link_list = ptr;
|
||||
}
|
||||
|
||||
ebpf_all_pids_count++;
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static inline void ebpf_release_pid_data(ebpf_pid_data_t *eps, int fd, uint32_t key, uint32_t idx)
|
||||
{
|
||||
if (fd) {
|
||||
bpf_map_delete_elem(fd, &key);
|
||||
}
|
||||
eps->thread_collecting &= ~(1<<idx);
|
||||
if (!eps->thread_collecting && !eps->has_proc_file) {
|
||||
ebpf_del_pid_entry((pid_t)key);
|
||||
}
|
||||
}
|
||||
|
||||
typedef struct ebpf_pid_stat {
|
||||
int32_t pid;
|
||||
uint32_t pid;
|
||||
uint64_t thread_collecting;
|
||||
char comm[EBPF_MAX_COMPARE_NAME + 1];
|
||||
char *cmdline;
|
||||
|
||||
uint32_t log_thrown;
|
||||
|
||||
// char state;
|
||||
int32_t ppid;
|
||||
uint32_t ppid;
|
||||
|
||||
int children_count; // number of processes directly referencing this
|
||||
unsigned char keep : 1; // 1 when we need to keep this process in memory even after it exited
|
||||
|
@ -199,8 +416,6 @@ static inline void debug_log_int(const char *fmt, ...)
|
|||
// ----------------------------------------------------------------------------
|
||||
// Exported variabled and functions
|
||||
//
|
||||
extern struct ebpf_pid_stat **ebpf_all_pids;
|
||||
|
||||
int ebpf_read_apps_groups_conf(struct ebpf_target **apps_groups_default_target,
|
||||
struct ebpf_target **apps_groups_root_target,
|
||||
const char *path,
|
||||
|
@ -216,7 +431,7 @@ int ebpf_read_hash_table(void *ep, int fd, uint32_t pid);
|
|||
|
||||
int get_pid_comm(pid_t pid, size_t n, char *dest);
|
||||
|
||||
void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core);
|
||||
void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core, uint32_t max_period);
|
||||
void ebpf_process_apps_accumulator(ebpf_process_stat_t *out, int maps_per_core);
|
||||
|
||||
// The default value is at least 32 times smaller than maximum number of PIDs allowed on system,
|
||||
|
@ -227,8 +442,7 @@ void ebpf_process_apps_accumulator(ebpf_process_stat_t *out, int maps_per_core);
|
|||
#define NETDATA_EBPF_ALLOC_MIN_ELEMENTS 256
|
||||
|
||||
// ARAL Sectiion
|
||||
extern void ebpf_aral_init(void);
|
||||
extern ebpf_pid_stat_t *ebpf_get_pid_entry(pid_t pid, pid_t tgid);
|
||||
void ebpf_aral_init(void);
|
||||
extern ebpf_process_stat_t *process_stat_vector;
|
||||
|
||||
extern ARAL *ebpf_aral_vfs_pid;
|
||||
|
@ -240,7 +454,7 @@ extern ARAL *ebpf_aral_shm_pid;
|
|||
void ebpf_shm_aral_init();
|
||||
netdata_publish_shm_t *ebpf_shm_stat_get(void);
|
||||
void ebpf_shm_release(netdata_publish_shm_t *stat);
|
||||
void ebpf_cleanup_exited_pids(int max);
|
||||
void ebpf_parse_proc_files();
|
||||
|
||||
// ARAL Section end
|
||||
|
||||
|
|
|
@ -528,6 +528,10 @@ static void ebpf_cachestat_exit(void *pptr)
|
|||
ebpf_module_t *em = CLEANUP_FUNCTION_GET_PTR(pptr);
|
||||
if(!em) return;
|
||||
|
||||
pthread_mutex_lock(&lock);
|
||||
collect_pids &= ~(1<<EBPF_MODULE_CACHESTAT_IDX);
|
||||
pthread_mutex_unlock(&lock);
|
||||
|
||||
if (ebpf_read_cachestat.thread)
|
||||
nd_thread_signal_cancel(ebpf_read_cachestat.thread);
|
||||
|
||||
|
@ -677,6 +681,9 @@ static void cachestat_apps_accumulator(netdata_cachestat_pid_t *out, int maps_pe
|
|||
total->mark_page_accessed += w->mark_page_accessed;
|
||||
if (w->ct > ct)
|
||||
ct = w->ct;
|
||||
|
||||
if (!total->name[0] && w->name[0])
|
||||
strncpyz(total->name, w->name, sizeof(total->name) - 1);
|
||||
}
|
||||
total->ct = ct;
|
||||
}
|
||||
|
@ -692,13 +699,14 @@ static void cachestat_apps_accumulator(netdata_cachestat_pid_t *out, int maps_pe
|
|||
static inline void cachestat_save_pid_values(netdata_publish_cachestat_t *out, netdata_cachestat_pid_t *in)
|
||||
{
|
||||
out->ct = in->ct;
|
||||
if (!out->current.mark_page_accessed) {
|
||||
memcpy(&out->current, &in[0], sizeof(netdata_cachestat_pid_t));
|
||||
return;
|
||||
if (out->current.mark_page_accessed) {
|
||||
memcpy(&out->prev, &out->current, sizeof(netdata_cachestat_t));
|
||||
}
|
||||
|
||||
memcpy(&out->prev, &out->current, sizeof(netdata_cachestat_pid_t));
|
||||
memcpy(&out->current, &in[0], sizeof(netdata_cachestat_pid_t));
|
||||
out->current.account_page_dirtied = in[0].account_page_dirtied;
|
||||
out->current.add_to_page_cache_lru = in[0].add_to_page_cache_lru;
|
||||
out->current.mark_buffer_dirty = in[0].mark_buffer_dirty;
|
||||
out->current.mark_page_accessed = in[0].mark_page_accessed;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -707,8 +715,9 @@ static inline void cachestat_save_pid_values(netdata_publish_cachestat_t *out, n
|
|||
* Read the apps table and store data inside the structure.
|
||||
*
|
||||
* @param maps_per_core do I need to read all cores?
|
||||
* @param max_period limit of iterations without updates before remove data from hash table
|
||||
*/
|
||||
static void ebpf_read_cachestat_apps_table(int maps_per_core, int max_period)
|
||||
static void ebpf_read_cachestat_apps_table(int maps_per_core, uint32_t max_period)
|
||||
{
|
||||
netdata_cachestat_pid_t *cv = cachestat_vector;
|
||||
int fd = cachestat_maps[NETDATA_CACHESTAT_PID_STATS].map_fd;
|
||||
|
@ -724,17 +733,18 @@ static void ebpf_read_cachestat_apps_table(int maps_per_core, int max_period)
|
|||
|
||||
cachestat_apps_accumulator(cv, maps_per_core);
|
||||
|
||||
ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(key, cv->tgid);
|
||||
if (!local_pid)
|
||||
goto end_cachestat_loop;
|
||||
ebpf_pid_data_t *local_pid = ebpf_get_pid_data(key, cv->tgid, cv->name, EBPF_MODULE_CACHESTAT_IDX);
|
||||
netdata_publish_cachestat_t *publish = local_pid->cachestat;
|
||||
if (!publish)
|
||||
local_pid->cachestat = publish = ebpf_cachestat_allocate_publish();
|
||||
|
||||
netdata_publish_cachestat_t *publish = &local_pid->cachestat;
|
||||
if (!publish->ct || publish->ct != cv->ct){
|
||||
cachestat_save_pid_values(publish, cv);
|
||||
local_pid->not_updated = 0;
|
||||
} else if (++local_pid->not_updated >= max_period) {
|
||||
bpf_map_delete_elem(fd, &key);
|
||||
local_pid->not_updated = 0;
|
||||
ebpf_release_pid_data(local_pid, fd, key, EBPF_MODULE_CACHESTAT_IDX);
|
||||
ebpf_cachestat_release_publish(publish);
|
||||
local_pid->cachestat = NULL;
|
||||
}
|
||||
|
||||
end_cachestat_loop:
|
||||
|
@ -759,13 +769,14 @@ static void ebpf_update_cachestat_cgroup()
|
|||
struct pid_on_target2 *pids;
|
||||
for (pids = ect->pids; pids; pids = pids->next) {
|
||||
int pid = pids->pid;
|
||||
netdata_cachestat_pid_t *out = &pids->cachestat;
|
||||
ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
|
||||
if (local_pid) {
|
||||
netdata_publish_cachestat_t *in = &local_pid->cachestat;
|
||||
netdata_publish_cachestat_t *out = &pids->cachestat;
|
||||
|
||||
memcpy(out, &in->current, sizeof(netdata_cachestat_pid_t));
|
||||
}
|
||||
ebpf_pid_data_t *local_pid = ebpf_get_pid_data(pid, 0, NULL, EBPF_MODULE_CACHESTAT_IDX);
|
||||
netdata_publish_cachestat_t *in = local_pid->cachestat;
|
||||
if (!in)
|
||||
continue;
|
||||
|
||||
memcpy(&out->current, &in->current, sizeof(netdata_cachestat_t));
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&mutex_cgroup_shm);
|
||||
|
@ -784,20 +795,19 @@ void ebpf_cachestat_sum_pids(netdata_publish_cachestat_t *publish, struct ebpf_p
|
|||
memcpy(&publish->prev, &publish->current,sizeof(publish->current));
|
||||
memset(&publish->current, 0, sizeof(publish->current));
|
||||
|
||||
netdata_cachestat_pid_t *dst = &publish->current;
|
||||
while (root) {
|
||||
netdata_cachestat_t *dst = &publish->current;
|
||||
for (; root; root = root->next) {
|
||||
int32_t pid = root->pid;
|
||||
ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
|
||||
if (local_pid) {
|
||||
netdata_publish_cachestat_t *w = &local_pid->cachestat;
|
||||
netdata_cachestat_pid_t *src = &w->current;
|
||||
dst->account_page_dirtied += src->account_page_dirtied;
|
||||
dst->add_to_page_cache_lru += src->add_to_page_cache_lru;
|
||||
dst->mark_buffer_dirty += src->mark_buffer_dirty;
|
||||
dst->mark_page_accessed += src->mark_page_accessed;
|
||||
}
|
||||
ebpf_pid_data_t *local_pid = ebpf_get_pid_data(pid, 0, NULL, EBPF_MODULE_CACHESTAT_IDX);
|
||||
netdata_publish_cachestat_t *w = local_pid->cachestat;
|
||||
if (!w)
|
||||
continue;
|
||||
|
||||
root = root->next;
|
||||
netdata_cachestat_t *src = &w->current;
|
||||
dst->account_page_dirtied += src->account_page_dirtied;
|
||||
dst->add_to_page_cache_lru += src->add_to_page_cache_lru;
|
||||
dst->mark_buffer_dirty += src->mark_buffer_dirty;
|
||||
dst->mark_page_accessed += src->mark_page_accessed;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -834,7 +844,7 @@ void *ebpf_read_cachestat_thread(void *ptr)
|
|||
|
||||
int maps_per_core = em->maps_per_core;
|
||||
int update_every = em->update_every;
|
||||
int max_period = update_every * EBPF_CLEANUP_FACTOR;
|
||||
uint32_t max_period = EBPF_CLEANUP_FACTOR;
|
||||
|
||||
int counter = update_every - 1;
|
||||
|
||||
|
@ -1020,8 +1030,8 @@ void ebpf_cache_send_apps_data(struct ebpf_target *root)
|
|||
if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_CACHESTAT_IDX))))
|
||||
continue;
|
||||
|
||||
netdata_cachestat_pid_t *current = &w->cachestat.current;
|
||||
netdata_cachestat_pid_t *prev = &w->cachestat.prev;
|
||||
netdata_cachestat_t *current = &w->cachestat.current;
|
||||
netdata_cachestat_t *prev = &w->cachestat.prev;
|
||||
|
||||
uint64_t mpa = current->mark_page_accessed - prev->mark_page_accessed;
|
||||
uint64_t mbd = current->mark_buffer_dirty - prev->mark_buffer_dirty;
|
||||
|
@ -1067,16 +1077,14 @@ void ebpf_cachestat_sum_cgroup_pids(netdata_publish_cachestat_t *publish, struct
|
|||
memcpy(&publish->prev, &publish->current,sizeof(publish->current));
|
||||
memset(&publish->current, 0, sizeof(publish->current));
|
||||
|
||||
netdata_cachestat_pid_t *dst = &publish->current;
|
||||
while (root) {
|
||||
netdata_cachestat_pid_t *src = &root->cachestat;
|
||||
netdata_cachestat_t *dst = &publish->current;
|
||||
for (; root; root = root->next) {
|
||||
netdata_cachestat_t *src = &root->cachestat.current;
|
||||
|
||||
dst->account_page_dirtied += src->account_page_dirtied;
|
||||
dst->add_to_page_cache_lru += src->add_to_page_cache_lru;
|
||||
dst->mark_buffer_dirty += src->mark_buffer_dirty;
|
||||
dst->mark_page_accessed += src->mark_page_accessed;
|
||||
|
||||
root = root->next;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1091,8 +1099,8 @@ void ebpf_cachestat_calc_chart_values()
|
|||
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
|
||||
ebpf_cachestat_sum_cgroup_pids(&ect->publish_cachestat, ect->pids);
|
||||
|
||||
netdata_cachestat_pid_t *current = &ect->publish_cachestat.current;
|
||||
netdata_cachestat_pid_t *prev = &ect->publish_cachestat.prev;
|
||||
netdata_cachestat_t *current = &ect->publish_cachestat.current;
|
||||
netdata_cachestat_t *prev = &ect->publish_cachestat.prev;
|
||||
|
||||
uint64_t mpa = current->mark_page_accessed - prev->mark_page_accessed;
|
||||
uint64_t mbd = current->mark_buffer_dirty - prev->mark_buffer_dirty;
|
||||
|
|
|
@ -69,20 +69,27 @@ enum cachestat_tables {
|
|||
NETDATA_CACHESTAT_CTRL
|
||||
};
|
||||
|
||||
typedef struct netdata_publish_cachestat_pid {
|
||||
typedef struct netdata_cachestat_pid {
|
||||
uint64_t ct;
|
||||
uint32_t tgid;
|
||||
uint32_t uid;
|
||||
uint32_t gid;
|
||||
char name[TASK_COMM_LEN];
|
||||
|
||||
uint64_t add_to_page_cache_lru;
|
||||
uint64_t mark_page_accessed;
|
||||
uint64_t account_page_dirtied;
|
||||
uint64_t mark_buffer_dirty;
|
||||
uint32_t add_to_page_cache_lru;
|
||||
uint32_t mark_page_accessed;
|
||||
uint32_t account_page_dirtied;
|
||||
uint32_t mark_buffer_dirty;
|
||||
} netdata_cachestat_pid_t;
|
||||
|
||||
typedef struct netdata_publish_cachestat {
|
||||
typedef struct __attribute__((packed)) netdata_cachestat {
|
||||
uint32_t add_to_page_cache_lru;
|
||||
uint32_t mark_page_accessed;
|
||||
uint32_t account_page_dirtied;
|
||||
uint32_t mark_buffer_dirty;
|
||||
} netdata_cachestat_t;
|
||||
|
||||
typedef struct __attribute__((packed)) netdata_publish_cachestat {
|
||||
uint64_t ct;
|
||||
|
||||
long long ratio;
|
||||
|
@ -90,8 +97,8 @@ typedef struct netdata_publish_cachestat {
|
|||
long long hit;
|
||||
long long miss;
|
||||
|
||||
netdata_cachestat_pid_t current;
|
||||
netdata_cachestat_pid_t prev;
|
||||
netdata_cachestat_t current;
|
||||
netdata_cachestat_t prev;
|
||||
} netdata_publish_cachestat_t;
|
||||
|
||||
void *ebpf_cachestat_thread(void *ptr);
|
||||
|
|
|
@ -14,13 +14,13 @@ struct pid_on_target2 {
|
|||
int updated;
|
||||
|
||||
netdata_publish_swap_t swap;
|
||||
netdata_fd_stat_t fd;
|
||||
netdata_publish_fd_stat_t fd;
|
||||
netdata_publish_vfs_t vfs;
|
||||
ebpf_process_stat_t ps;
|
||||
ebpf_publish_process_t ps;
|
||||
netdata_dcstat_pid_t dc;
|
||||
netdata_publish_shm_t shm;
|
||||
netdata_socket_t socket;
|
||||
netdata_cachestat_pid_t cachestat;
|
||||
netdata_publish_cachestat_t cachestat;
|
||||
|
||||
struct pid_on_target2 *next;
|
||||
};
|
||||
|
@ -55,9 +55,9 @@ typedef struct ebpf_cgroup_target {
|
|||
uint32_t updated;
|
||||
|
||||
netdata_publish_swap_t publish_systemd_swap;
|
||||
netdata_fd_stat_t publish_systemd_fd;
|
||||
netdata_publish_fd_stat_t publish_systemd_fd;
|
||||
netdata_publish_vfs_t publish_systemd_vfs;
|
||||
ebpf_process_stat_t publish_systemd_ps;
|
||||
ebpf_publish_process_t publish_systemd_ps;
|
||||
netdata_publish_dcstat_t publish_dc;
|
||||
int oomkill;
|
||||
netdata_publish_shm_t publish_shm;
|
||||
|
|
|
@ -456,6 +456,10 @@ static void ebpf_dcstat_exit(void *pptr)
|
|||
ebpf_module_t *em = CLEANUP_FUNCTION_GET_PTR(pptr);
|
||||
if(!em) return;
|
||||
|
||||
pthread_mutex_lock(&lock);
|
||||
collect_pids &= ~(1<<EBPF_MODULE_DCSTAT_IDX);
|
||||
pthread_mutex_unlock(&lock);
|
||||
|
||||
if (ebpf_read_dcstat.thread)
|
||||
nd_thread_signal_cancel(ebpf_read_dcstat.thread);
|
||||
|
||||
|
@ -524,6 +528,9 @@ static void ebpf_dcstat_apps_accumulator(netdata_dcstat_pid_t *out, int maps_per
|
|||
|
||||
if (w->ct > ct)
|
||||
ct = w->ct;
|
||||
|
||||
if (!total->name[0] && w->name[0])
|
||||
strncpyz(total->name, w->name, sizeof(total->name) - 1);
|
||||
}
|
||||
total->ct = ct;
|
||||
}
|
||||
|
@ -534,8 +541,9 @@ static void ebpf_dcstat_apps_accumulator(netdata_dcstat_pid_t *out, int maps_per
|
|||
* Read the apps table and store data inside the structure.
|
||||
*
|
||||
* @param maps_per_core do I need to read all cores?
|
||||
* @param max_period limit of iterations without updates before remove data from hash table
|
||||
*/
|
||||
static void ebpf_read_dc_apps_table(int maps_per_core, int max_period)
|
||||
static void ebpf_read_dc_apps_table(int maps_per_core, uint32_t max_period)
|
||||
{
|
||||
netdata_dcstat_pid_t *cv = dcstat_vector;
|
||||
int fd = dcstat_maps[NETDATA_DCSTAT_PID_STATS].map_fd;
|
||||
|
@ -551,16 +559,22 @@ static void ebpf_read_dc_apps_table(int maps_per_core, int max_period)
|
|||
|
||||
ebpf_dcstat_apps_accumulator(cv, maps_per_core);
|
||||
|
||||
ebpf_pid_stat_t *pid_stat = ebpf_get_pid_entry(key, cv->tgid);
|
||||
if (pid_stat) {
|
||||
netdata_publish_dcstat_t *publish = &pid_stat->dc;
|
||||
if (!publish->ct || publish->ct != cv->ct) {
|
||||
memcpy(&publish->curr, &cv[0], sizeof(netdata_dcstat_pid_t));
|
||||
pid_stat->not_updated = 0;
|
||||
} else if (++pid_stat->not_updated >= max_period) {
|
||||
bpf_map_delete_elem(fd, &key);
|
||||
pid_stat->not_updated = 0;
|
||||
}
|
||||
ebpf_pid_data_t *pid_stat = ebpf_get_pid_data(key, cv->tgid, cv->name, EBPF_MODULE_DCSTAT_IDX);
|
||||
netdata_publish_dcstat_t *publish = pid_stat->dc;
|
||||
if (!publish)
|
||||
pid_stat->dc = publish = ebpf_dcallocate_publish();
|
||||
|
||||
if (!publish->ct || publish->ct != cv->ct) {
|
||||
publish->ct = cv->ct;
|
||||
publish->curr.not_found = cv[0].not_found;
|
||||
publish->curr.file_system = cv[0].file_system;
|
||||
publish->curr.cache_access = cv[0].cache_access;
|
||||
|
||||
pid_stat->not_updated = 0;
|
||||
} else if (++pid_stat->not_updated >= max_period) {
|
||||
ebpf_release_pid_data(pid_stat, fd, key, EBPF_MODULE_DCSTAT_IDX);
|
||||
ebpf_dc_release_publish(publish);
|
||||
pid_stat->dc = NULL;
|
||||
}
|
||||
|
||||
end_dc_loop:
|
||||
|
@ -580,20 +594,17 @@ end_dc_loop:
|
|||
*/
|
||||
void ebpf_dcstat_sum_pids(netdata_publish_dcstat_t *publish, struct ebpf_pid_on_target *root)
|
||||
{
|
||||
memset(&publish->curr, 0, sizeof(netdata_dcstat_pid_t));
|
||||
netdata_dcstat_pid_t *dst = &publish->curr;
|
||||
while (root) {
|
||||
memset(&publish->curr, 0, sizeof(netdata_publish_dcstat_pid_t));
|
||||
for (; root; root = root->next) {
|
||||
int32_t pid = root->pid;
|
||||
ebpf_pid_stat_t *pid_stat = ebpf_get_pid_entry(pid, 0);
|
||||
if (pid_stat) {
|
||||
netdata_publish_dcstat_t *w = &pid_stat->dc;
|
||||
netdata_dcstat_pid_t *src = &w->curr;
|
||||
dst->cache_access += src->cache_access;
|
||||
dst->file_system += src->file_system;
|
||||
dst->not_found += src->not_found;
|
||||
}
|
||||
ebpf_pid_data_t *pid_stat = ebpf_get_pid_data(pid, 0, NULL, EBPF_MODULE_DCSTAT_IDX);
|
||||
netdata_publish_dcstat_t *w = pid_stat->dc;
|
||||
if (!w)
|
||||
continue;
|
||||
|
||||
root = root->next;
|
||||
publish->curr.cache_access += w->curr.cache_access;
|
||||
publish->curr.file_system += w->curr.file_system;
|
||||
publish->curr.not_found += w->curr.not_found;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -635,13 +646,16 @@ void *ebpf_read_dcstat_thread(void *ptr)
|
|||
|
||||
int maps_per_core = em->maps_per_core;
|
||||
int update_every = em->update_every;
|
||||
int collect_pid = (em->apps_charts || em->cgroup_charts);
|
||||
if (!collect_pid)
|
||||
return NULL;
|
||||
|
||||
int counter = update_every - 1;
|
||||
|
||||
uint32_t lifetime = em->lifetime;
|
||||
uint32_t running_time = 0;
|
||||
usec_t period = update_every * USEC_PER_SEC;
|
||||
int max_period = update_every * EBPF_CLEANUP_FACTOR;
|
||||
uint32_t max_period = EBPF_CLEANUP_FACTOR;
|
||||
while (!ebpf_plugin_stop() && running_time < lifetime) {
|
||||
(void)heartbeat_next(&hb, period);
|
||||
if (ebpf_plugin_stop() || ++counter != update_every)
|
||||
|
@ -771,12 +785,12 @@ static void ebpf_update_dc_cgroup()
|
|||
for (pids = ect->pids; pids; pids = pids->next) {
|
||||
int pid = pids->pid;
|
||||
netdata_dcstat_pid_t *out = &pids->dc;
|
||||
ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
|
||||
if (local_pid) {
|
||||
netdata_publish_dcstat_t *in = &local_pid->dc;
|
||||
ebpf_pid_data_t *local_pid = ebpf_get_pid_data(pid, 0, NULL, EBPF_MODULE_DCSTAT_IDX);
|
||||
netdata_publish_dcstat_t *in = local_pid->dc;
|
||||
if (!in)
|
||||
continue;
|
||||
|
||||
memcpy(out, &in->curr, sizeof(netdata_dcstat_pid_t));
|
||||
}
|
||||
memcpy(out, &in->curr, sizeof(netdata_publish_dcstat_pid_t));
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&mutex_cgroup_shm);
|
||||
|
@ -1001,13 +1015,12 @@ static void ebpf_obsolete_specific_dc_charts(char *type, int update_every)
|
|||
void ebpf_dc_sum_cgroup_pids(netdata_publish_dcstat_t *publish, struct pid_on_target2 *root)
|
||||
{
|
||||
memset(&publish->curr, 0, sizeof(netdata_dcstat_pid_t));
|
||||
netdata_dcstat_pid_t *dst = &publish->curr;
|
||||
while (root) {
|
||||
netdata_dcstat_pid_t *src = &root->dc;
|
||||
|
||||
dst->cache_access += src->cache_access;
|
||||
dst->file_system += src->file_system;
|
||||
dst->not_found += src->not_found;
|
||||
publish->curr.cache_access += src->cache_access;
|
||||
publish->curr.file_system += src->file_system;
|
||||
publish->curr.not_found += src->not_found;
|
||||
|
||||
root = root->next;
|
||||
}
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
#ifndef NETDATA_EBPF_DCSTAT_H
|
||||
#define NETDATA_EBPF_DCSTAT_H 1
|
||||
|
||||
#include "ebpf.h"
|
||||
|
||||
// Module name & description
|
||||
#define NETDATA_EBPF_MODULE_NAME_DCSTAT "dcstat"
|
||||
#define NETDATA_EBPF_DC_MODULE_DESC "Monitor file access using directory cache. This thread is integrated with apps and cgroup."
|
||||
|
@ -69,26 +71,32 @@ enum directory_cache_targets {
|
|||
NETDATA_DC_TARGET_D_LOOKUP
|
||||
};
|
||||
|
||||
typedef struct netdata_publish_dcstat_pid {
|
||||
typedef struct __attribute__((packed)) netdata_publish_dcstat_pid {
|
||||
uint64_t cache_access;
|
||||
uint32_t file_system;
|
||||
uint32_t not_found;
|
||||
} netdata_publish_dcstat_pid_t;
|
||||
|
||||
typedef struct netdata_dcstat_pid {
|
||||
uint64_t ct;
|
||||
uint32_t tgid;
|
||||
uint32_t uid;
|
||||
uint32_t gid;
|
||||
char name[TASK_COMM_LEN];
|
||||
|
||||
uint64_t cache_access;
|
||||
uint64_t file_system;
|
||||
uint64_t not_found;
|
||||
uint32_t cache_access;
|
||||
uint32_t file_system;
|
||||
uint32_t not_found;
|
||||
} netdata_dcstat_pid_t;
|
||||
|
||||
typedef struct netdata_publish_dcstat {
|
||||
typedef struct __attribute__((packed)) netdata_publish_dcstat {
|
||||
uint64_t ct;
|
||||
|
||||
long long ratio;
|
||||
long long cache_access;
|
||||
|
||||
netdata_dcstat_pid_t curr;
|
||||
netdata_dcstat_pid_t prev;
|
||||
netdata_publish_dcstat_pid_t curr;
|
||||
netdata_publish_dcstat_pid_t prev;
|
||||
} netdata_publish_dcstat_t;
|
||||
|
||||
void *ebpf_dcstat_thread(void *ptr);
|
||||
|
|
|
@ -551,6 +551,10 @@ static void ebpf_fd_exit(void *pptr)
|
|||
ebpf_module_t *em = CLEANUP_FUNCTION_GET_PTR(pptr);
|
||||
if(!em) return;
|
||||
|
||||
pthread_mutex_lock(&lock);
|
||||
collect_pids &= ~(1<<EBPF_MODULE_FD_IDX);
|
||||
pthread_mutex_unlock(&lock);
|
||||
|
||||
if (ebpf_read_fd.thread)
|
||||
nd_thread_signal_cancel(ebpf_read_fd.thread);
|
||||
|
||||
|
@ -656,12 +660,19 @@ static void fd_apps_accumulator(netdata_fd_stat_t *out, int maps_per_core)
|
|||
{
|
||||
int i, end = (maps_per_core) ? ebpf_nprocs : 1;
|
||||
netdata_fd_stat_t *total = &out[0];
|
||||
uint64_t ct = total->ct;
|
||||
for (i = 1; i < end; i++) {
|
||||
netdata_fd_stat_t *w = &out[i];
|
||||
total->open_call += w->open_call;
|
||||
total->close_call += w->close_call;
|
||||
total->open_err += w->open_err;
|
||||
total->close_err += w->close_err;
|
||||
|
||||
if (w->ct > ct)
|
||||
ct = w->ct;
|
||||
|
||||
if (!total->name[0] && w->name[0])
|
||||
strncpyz(total->name, w->name, sizeof(total->name) - 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -671,8 +682,9 @@ static void fd_apps_accumulator(netdata_fd_stat_t *out, int maps_per_core)
|
|||
* Read the apps table and store data inside the structure.
|
||||
*
|
||||
* @param maps_per_core do I need to read all cores?
|
||||
* @param max_period limit of iterations without updates before remove data from hash table
|
||||
*/
|
||||
static void ebpf_read_fd_apps_table(int maps_per_core, int max_period)
|
||||
static void ebpf_read_fd_apps_table(int maps_per_core, uint32_t max_period)
|
||||
{
|
||||
netdata_fd_stat_t *fv = fd_vector;
|
||||
int fd = fd_maps[NETDATA_FD_PID_STATS].map_fd;
|
||||
|
@ -688,16 +700,23 @@ static void ebpf_read_fd_apps_table(int maps_per_core, int max_period)
|
|||
|
||||
fd_apps_accumulator(fv, maps_per_core);
|
||||
|
||||
ebpf_pid_stat_t *pid_stat = ebpf_get_pid_entry(key, fv->tgid);
|
||||
if (pid_stat) {
|
||||
netdata_fd_stat_t *publish_fd = &pid_stat->fd;
|
||||
if (!publish_fd->ct || publish_fd->ct != fv->ct) {
|
||||
memcpy(publish_fd, &fv[0], sizeof(netdata_fd_stat_t));
|
||||
pid_stat->not_updated = 0;
|
||||
} else if (++pid_stat->not_updated >= max_period) {
|
||||
bpf_map_delete_elem(fd, &key);
|
||||
pid_stat->not_updated = 0;
|
||||
}
|
||||
ebpf_pid_data_t *pid_stat = ebpf_get_pid_data(key, fv->tgid, fv->name, EBPF_MODULE_FD_IDX);
|
||||
netdata_publish_fd_stat_t *publish_fd = pid_stat->fd;
|
||||
if (!publish_fd)
|
||||
pid_stat->fd = publish_fd = ebpf_fd_allocate_publish();
|
||||
|
||||
if (!publish_fd->ct || publish_fd->ct != fv->ct) {
|
||||
publish_fd->ct = fv->ct;
|
||||
publish_fd->open_call = fv->open_call;
|
||||
publish_fd->close_call = fv->close_call;
|
||||
publish_fd->open_err = fv->open_err;
|
||||
publish_fd->close_err = fv->close_err;
|
||||
|
||||
pid_stat->not_updated = 0;
|
||||
} else if (++pid_stat->not_updated >= max_period) {
|
||||
ebpf_release_pid_data(pid_stat, fd, key, EBPF_MODULE_FD_IDX);
|
||||
ebpf_fd_release_publish(publish_fd);
|
||||
pid_stat->fd = NULL;
|
||||
}
|
||||
|
||||
end_fd_loop:
|
||||
|
@ -719,18 +738,17 @@ static void ebpf_fd_sum_pids(netdata_fd_stat_t *fd, struct ebpf_pid_on_target *r
|
|||
{
|
||||
memset(fd, 0, sizeof(netdata_fd_stat_t));
|
||||
|
||||
while (root) {
|
||||
for (; root; root = root->next) {
|
||||
int32_t pid = root->pid;
|
||||
ebpf_pid_stat_t *pid_stat = ebpf_get_pid_entry(pid, 0);
|
||||
if (pid_stat) {
|
||||
netdata_fd_stat_t *w = &pid_stat->fd;
|
||||
fd->open_call += w->open_call;
|
||||
fd->close_call += w->close_call;
|
||||
fd->open_err += w->open_err;
|
||||
fd->close_err += w->close_err;
|
||||
}
|
||||
ebpf_pid_data_t *pid_stat = ebpf_get_pid_data(pid, 0, NULL, EBPF_MODULE_FD_IDX);
|
||||
netdata_publish_fd_stat_t *w = pid_stat->fd;
|
||||
if (!w)
|
||||
continue;
|
||||
|
||||
root = root->next;
|
||||
fd->open_call += w->open_call;
|
||||
fd->close_call += w->close_call;
|
||||
fd->open_err += w->open_err;
|
||||
fd->close_err += w->close_err;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -767,13 +785,16 @@ void *ebpf_read_fd_thread(void *ptr)
|
|||
|
||||
int maps_per_core = em->maps_per_core;
|
||||
int update_every = em->update_every;
|
||||
int collect_pid = (em->apps_charts || em->cgroup_charts);
|
||||
if (!collect_pid)
|
||||
return NULL;
|
||||
|
||||
int counter = update_every - 1;
|
||||
|
||||
uint32_t lifetime = em->lifetime;
|
||||
uint32_t running_time = 0;
|
||||
usec_t period = update_every * USEC_PER_SEC;
|
||||
int max_period = update_every * EBPF_CLEANUP_FACTOR;
|
||||
int period = USEC_PER_SEC;
|
||||
uint32_t max_period = EBPF_CLEANUP_FACTOR;
|
||||
while (!ebpf_plugin_stop() && running_time < lifetime) {
|
||||
(void)heartbeat_next(&hb, period);
|
||||
if (ebpf_plugin_stop() || ++counter != update_every)
|
||||
|
@ -815,13 +836,12 @@ static void ebpf_update_fd_cgroup()
|
|||
struct pid_on_target2 *pids;
|
||||
for (pids = ect->pids; pids; pids = pids->next) {
|
||||
int pid = pids->pid;
|
||||
netdata_fd_stat_t *out = &pids->fd;
|
||||
ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
|
||||
if (local_pid) {
|
||||
netdata_fd_stat_t *in = &local_pid->fd;
|
||||
|
||||
memcpy(out, in, sizeof(netdata_fd_stat_t));
|
||||
}
|
||||
netdata_publish_fd_stat_t *out = &pids->fd;
|
||||
ebpf_pid_data_t *local_pid = ebpf_get_pid_data(pid, 0, NULL, EBPF_MODULE_FD_IDX);
|
||||
netdata_publish_fd_stat_t *in = local_pid->fd;
|
||||
if (!in)
|
||||
continue;
|
||||
memcpy(out, in, sizeof(netdata_publish_fd_stat_t));
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&mutex_cgroup_shm);
|
||||
|
@ -872,13 +892,13 @@ void ebpf_fd_send_apps_data(ebpf_module_t *em, struct ebpf_target *root)
|
|||
* @param fd structure used to store data
|
||||
* @param pids input data
|
||||
*/
|
||||
static void ebpf_fd_sum_cgroup_pids(netdata_fd_stat_t *fd, struct pid_on_target2 *pids)
|
||||
static void ebpf_fd_sum_cgroup_pids(netdata_publish_fd_stat_t *fd, struct pid_on_target2 *pids)
|
||||
{
|
||||
netdata_fd_stat_t accumulator;
|
||||
memset(&accumulator, 0, sizeof(accumulator));
|
||||
|
||||
while (pids) {
|
||||
netdata_fd_stat_t *w = &pids->fd;
|
||||
netdata_publish_fd_stat_t *w = &pids->fd;
|
||||
|
||||
accumulator.open_err += w->open_err;
|
||||
accumulator.open_call += w->open_call;
|
||||
|
@ -995,7 +1015,7 @@ static void ebpf_obsolete_specific_fd_charts(char *type, ebpf_module_t *em)
|
|||
* @param type chart type
|
||||
* @param values structure with values that will be sent to netdata
|
||||
*/
|
||||
static void ebpf_send_specific_fd_data(char *type, netdata_fd_stat_t *values, ebpf_module_t *em)
|
||||
static void ebpf_send_specific_fd_data(char *type, netdata_publish_fd_stat_t *values, ebpf_module_t *em)
|
||||
{
|
||||
ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_OPEN, "");
|
||||
write_chart_dimension(fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN].name, (long long)values->open_call);
|
||||
|
@ -1463,7 +1483,8 @@ void *ebpf_fd_thread(void *ptr)
|
|||
|
||||
pthread_mutex_unlock(&lock);
|
||||
|
||||
ebpf_read_fd.thread = nd_thread_create(ebpf_read_fd.name, NETDATA_THREAD_OPTION_DEFAULT, ebpf_read_fd_thread, em);
|
||||
ebpf_read_fd.thread = nd_thread_create(ebpf_read_fd.name, NETDATA_THREAD_OPTION_DEFAULT,
|
||||
ebpf_read_fd_thread, em);
|
||||
|
||||
fd_collector(em);
|
||||
|
||||
|
|
|
@ -40,6 +40,17 @@
|
|||
// ARAL name
|
||||
#define NETDATA_EBPF_FD_ARAL_NAME "ebpf_fd"
|
||||
|
||||
typedef struct __attribute__((packed)) netdata_publish_fd_stat {
|
||||
uint64_t ct;
|
||||
|
||||
uint32_t open_call; // Open syscalls (open and openat)
|
||||
uint32_t close_call; // Close syscall (close)
|
||||
|
||||
// Errors
|
||||
uint32_t open_err;
|
||||
uint32_t close_err;
|
||||
} netdata_publish_fd_stat_t;
|
||||
|
||||
typedef struct netdata_fd_stat {
|
||||
uint64_t ct;
|
||||
uint32_t tgid;
|
||||
|
|
|
@ -333,6 +333,46 @@ static inline int ebpf_fs_load_and_attach(ebpf_local_maps_t *map, struct filesys
|
|||
*
|
||||
*****************************************************************/
|
||||
|
||||
/**
|
||||
* Obsolete Cleanup Struct
|
||||
*
|
||||
* Clean allocatged data durinc obsolete steps
|
||||
*
|
||||
* @param efp
|
||||
*/
|
||||
static void ebpf_obsolete_cleanup_struct(ebpf_filesystem_partitions_t *efp) {
|
||||
freez(efp->hread.name);
|
||||
efp->hread.name = NULL;
|
||||
freez(efp->hread.title);
|
||||
efp->hread.title = NULL;
|
||||
freez(efp->hread.ctx);
|
||||
efp->hread.ctx = NULL;
|
||||
|
||||
freez(efp->hwrite.name);
|
||||
efp->hwrite.name = NULL;
|
||||
freez(efp->hwrite.title);
|
||||
efp->hwrite.title = NULL;
|
||||
freez(efp->hwrite.ctx);
|
||||
efp->hwrite.ctx = NULL;
|
||||
|
||||
freez(efp->hopen.name);
|
||||
efp->hopen.name = NULL;
|
||||
freez(efp->hopen.title);
|
||||
efp->hopen.title = NULL;
|
||||
freez(efp->hopen.ctx);
|
||||
efp->hopen.ctx = NULL;
|
||||
|
||||
freez(efp->hadditional.name);
|
||||
efp->hadditional.name = NULL;
|
||||
freez(efp->hadditional.title);
|
||||
efp->hadditional.title = NULL;
|
||||
freez(efp->hadditional.ctx);
|
||||
efp->hadditional.ctx = NULL;
|
||||
|
||||
freez(efp->family_name);
|
||||
efp->family_name = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create Filesystem chart
|
||||
*
|
||||
|
@ -348,7 +388,7 @@ static void ebpf_obsolete_fs_charts(int update_every)
|
|||
ebpf_filesystem_partitions_t *efp = &localfs[i];
|
||||
uint32_t flags = efp->flags;
|
||||
if ((flags & test) == test) {
|
||||
flags &= ~NETDATA_FILESYSTEM_FLAG_CHART_CREATED;
|
||||
flags &= ~test;
|
||||
|
||||
ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hread.name,
|
||||
"",
|
||||
|
@ -370,6 +410,8 @@ static void ebpf_obsolete_fs_charts(int update_every)
|
|||
EBPF_COMMON_UNITS_CALLS_PER_SEC, efp->family_name,
|
||||
NULL, NETDATA_EBPF_CHART_TYPE_STACKED, efp->hadditional.order,
|
||||
update_every);
|
||||
|
||||
ebpf_obsolete_cleanup_struct(efp);
|
||||
}
|
||||
efp->flags = flags;
|
||||
}
|
||||
|
@ -395,9 +437,10 @@ static void ebpf_create_fs_charts(int update_every)
|
|||
snprintfz(title, sizeof(title) - 1, "%s latency for each read request.", efp->filesystem);
|
||||
snprintfz(family, sizeof(family) - 1, "%s_latency", efp->family);
|
||||
snprintfz(chart_name, sizeof(chart_name) - 1, "%s_read_latency", efp->filesystem);
|
||||
snprintfz(ctx, sizeof(ctx) - 1, "filesystem.read_latency");
|
||||
efp->hread.name = strdupz(chart_name);
|
||||
efp->hread.title = strdupz(title);
|
||||
efp->hread.ctx = NULL;
|
||||
efp->hread.ctx = strdupz(ctx);
|
||||
efp->hread.order = order;
|
||||
efp->family_name = strdupz(family);
|
||||
|
||||
|
@ -412,9 +455,10 @@ static void ebpf_create_fs_charts(int update_every)
|
|||
|
||||
snprintfz(title, sizeof(title) - 1, "%s latency for each write request.", efp->filesystem);
|
||||
snprintfz(chart_name, sizeof(chart_name) - 1, "%s_write_latency", efp->filesystem);
|
||||
snprintfz(ctx, sizeof(ctx) - 1, "filesystem.write_latency");
|
||||
efp->hwrite.name = strdupz(chart_name);
|
||||
efp->hwrite.title = strdupz(title);
|
||||
efp->hwrite.ctx = NULL;
|
||||
efp->hwrite.ctx = strdupz(ctx);
|
||||
efp->hwrite.order = order;
|
||||
ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, efp->hwrite.name,
|
||||
efp->hwrite.title,
|
||||
|
@ -427,9 +471,10 @@ static void ebpf_create_fs_charts(int update_every)
|
|||
|
||||
snprintfz(title, sizeof(title) - 1, "%s latency for each open request.", efp->filesystem);
|
||||
snprintfz(chart_name, sizeof(chart_name) - 1, "%s_open_latency", efp->filesystem);
|
||||
snprintfz(ctx, sizeof(ctx) - 1, "filesystem.open_latency");
|
||||
efp->hopen.name = strdupz(chart_name);
|
||||
efp->hopen.title = strdupz(title);
|
||||
efp->hopen.ctx = NULL;
|
||||
efp->hopen.ctx = strdupz(ctx);
|
||||
efp->hopen.order = order;
|
||||
ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, efp->hopen.name,
|
||||
efp->hopen.title,
|
||||
|
@ -443,7 +488,7 @@ static void ebpf_create_fs_charts(int update_every)
|
|||
char *type = (efp->flags & NETDATA_FILESYSTEM_ATTR_CHARTS) ? "attribute" : "sync";
|
||||
snprintfz(title, sizeof(title) - 1, "%s latency for each %s request.", efp->filesystem, type);
|
||||
snprintfz(chart_name, sizeof(chart_name) - 1, "%s_%s_latency", efp->filesystem, type);
|
||||
snprintfz(ctx, sizeof(ctx) - 1, "filesystem.%s_latency", type);
|
||||
snprintfz(ctx, sizeof(ctx) - 1, "filesystem.%s_latency", efp->filesystem);
|
||||
efp->hadditional.name = strdupz(chart_name);
|
||||
efp->hadditional.title = strdupz(title);
|
||||
efp->hadditional.ctx = strdupz(ctx);
|
||||
|
@ -575,7 +620,9 @@ static int ebpf_read_local_partitions()
|
|||
ebpf_filesystem_partitions_t *w = &localfs[i];
|
||||
if (w->enabled && (!strcmp(fs, w->filesystem) ||
|
||||
(w->optional_filesystem && !strcmp(fs, w->optional_filesystem)))) {
|
||||
localfs[i].flags |= NETDATA_FILESYSTEM_LOAD_EBPF_PROGRAM;
|
||||
if (!(localfs[i].flags & NETDATA_FILESYSTEM_FLAG_CHART_CREATED))
|
||||
localfs[i].flags |= NETDATA_FILESYSTEM_LOAD_EBPF_PROGRAM;
|
||||
|
||||
localfs[i].flags &= ~NETDATA_FILESYSTEM_REMOVE_CHARTS;
|
||||
count++;
|
||||
break;
|
||||
|
@ -892,10 +939,10 @@ static void read_filesystem_tables(int maps_per_core)
|
|||
*/
|
||||
void ebpf_filesystem_read_hash(ebpf_module_t *em)
|
||||
{
|
||||
ebpf_obsolete_fs_charts(em->update_every);
|
||||
|
||||
(void) ebpf_update_partitions(em);
|
||||
|
||||
ebpf_obsolete_fs_charts(em->update_every);
|
||||
|
||||
if (em->optional)
|
||||
return;
|
||||
|
||||
|
|
|
@ -331,7 +331,7 @@ static void ebpf_function_socket_manipulation(const char *transaction,
|
|||
"Filters can be combined. Each filter can be given only one time. Default all ports\n"
|
||||
};
|
||||
|
||||
for (int i = 1; i < PLUGINSD_MAX_WORDS; i++) {
|
||||
for (int i = 1; i < PLUGINSD_MAX_WORDS; i++) {
|
||||
const char *keyword = get_word(words, num_words, i);
|
||||
if (!keyword)
|
||||
break;
|
||||
|
@ -428,6 +428,7 @@ for (int i = 1; i < PLUGINSD_MAX_WORDS; i++) {
|
|||
ebpf_socket_clean_judy_array_unsafe();
|
||||
rw_spinlock_write_unlock(&ebpf_judy_pid.index.rw_spinlock);
|
||||
|
||||
collect_pids |= 1<<EBPF_MODULE_SOCKET_IDX;
|
||||
pthread_mutex_lock(&ebpf_exit_cleanup);
|
||||
if (ebpf_function_start_thread(em, period)) {
|
||||
ebpf_function_error(transaction,
|
||||
|
|
|
@ -133,6 +133,10 @@ static void oomkill_cleanup(void *pptr)
|
|||
ebpf_module_t *em = CLEANUP_FUNCTION_GET_PTR(pptr);
|
||||
if(!em) return;
|
||||
|
||||
pthread_mutex_lock(&lock);
|
||||
collect_pids &= ~(1<<EBPF_MODULE_OOMKILL_IDX);
|
||||
pthread_mutex_unlock(&lock);
|
||||
|
||||
if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
|
||||
pthread_mutex_lock(&lock);
|
||||
|
||||
|
@ -549,7 +553,7 @@ void *ebpf_oomkill_thread(void *ptr)
|
|||
em->maps = oomkill_maps;
|
||||
|
||||
#define NETDATA_DEFAULT_OOM_DISABLED_MSG "Disabling OOMKILL thread, because"
|
||||
if (unlikely(!ebpf_all_pids || !em->apps_charts)) {
|
||||
if (unlikely(!em->apps_charts)) {
|
||||
// When we are not running integration with apps, we won't fill necessary variables for this thread to run, so
|
||||
// we need to disable it.
|
||||
pthread_mutex_lock(&ebpf_exit_cleanup);
|
||||
|
|
|
@ -229,13 +229,13 @@ static void ebpf_update_process_cgroup()
|
|||
struct pid_on_target2 *pids;
|
||||
for (pids = ect->pids; pids; pids = pids->next) {
|
||||
int pid = pids->pid;
|
||||
ebpf_process_stat_t *out = &pids->ps;
|
||||
ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
|
||||
if (local_pid) {
|
||||
ebpf_process_stat_t *in = &local_pid->process;
|
||||
ebpf_publish_process_t *out = &pids->ps;
|
||||
ebpf_pid_data_t *local_pid = ebpf_get_pid_data(pid, 0, NULL, EBPF_MODULE_PROCESS_IDX);
|
||||
ebpf_publish_process_t *in = local_pid->process;
|
||||
if (!in)
|
||||
continue;
|
||||
|
||||
memcpy(out, in, sizeof(ebpf_process_stat_t));
|
||||
}
|
||||
memcpy(out, in, sizeof(ebpf_publish_process_t));
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&mutex_cgroup_shm);
|
||||
|
@ -694,6 +694,10 @@ static void ebpf_process_exit(void *pptr)
|
|||
ebpf_module_t *em = CLEANUP_FUNCTION_GET_PTR(pptr);
|
||||
if(!em) return;
|
||||
|
||||
pthread_mutex_lock(&lock);
|
||||
collect_pids &= ~(1<<EBPF_MODULE_PROCESS_IDX);
|
||||
pthread_mutex_unlock(&lock);
|
||||
|
||||
if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
|
||||
pthread_mutex_lock(&lock);
|
||||
if (em->cgroup_charts) {
|
||||
|
@ -746,13 +750,13 @@ static void ebpf_process_exit(void *pptr)
|
|||
* @param ps structure used to store data
|
||||
* @param pids input data
|
||||
*/
|
||||
static void ebpf_process_sum_cgroup_pids(ebpf_process_stat_t *ps, struct pid_on_target2 *pids)
|
||||
static void ebpf_process_sum_cgroup_pids(ebpf_publish_process_t *ps, struct pid_on_target2 *pids)
|
||||
{
|
||||
ebpf_process_stat_t accumulator;
|
||||
ebpf_publish_process_t accumulator;
|
||||
memset(&accumulator, 0, sizeof(accumulator));
|
||||
|
||||
while (pids) {
|
||||
ebpf_process_stat_t *pps = &pids->ps;
|
||||
ebpf_publish_process_t *pps = &pids->ps;
|
||||
|
||||
accumulator.exit_call += pps->exit_call;
|
||||
accumulator.release_call += pps->release_call;
|
||||
|
@ -781,7 +785,7 @@ static void ebpf_process_sum_cgroup_pids(ebpf_process_stat_t *ps, struct pid_on_
|
|||
* @param values structure with values that will be sent to netdata
|
||||
* @param em the structure with thread information
|
||||
*/
|
||||
static void ebpf_send_specific_process_data(char *type, ebpf_process_stat_t *values, ebpf_module_t *em)
|
||||
static void ebpf_send_specific_process_data(char *type, ebpf_publish_process_t *values, ebpf_module_t *em)
|
||||
{
|
||||
ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_PROCESS, "");
|
||||
write_chart_dimension(process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK].name,
|
||||
|
|
|
@ -43,6 +43,7 @@
|
|||
|
||||
enum netdata_ebpf_stats_order {
|
||||
NETDATA_EBPF_ORDER_STAT_THREADS = 140000,
|
||||
NETDATA_EBPF_ORDER_PIDS,
|
||||
NETDATA_EBPF_ORDER_STAT_LIFE_TIME,
|
||||
NETDATA_EBPF_ORDER_STAT_LOAD_METHOD,
|
||||
NETDATA_EBPF_ORDER_STAT_KERNEL_MEMORY,
|
||||
|
|
|
@ -7,7 +7,7 @@ static char *shm_dimension_name[NETDATA_SHM_END] = { "get", "at", "dt", "ctl" };
|
|||
static netdata_syscall_stat_t shm_aggregated_data[NETDATA_SHM_END];
|
||||
static netdata_publish_syscall_t shm_publish_aggregated[NETDATA_SHM_END];
|
||||
|
||||
netdata_publish_shm_t *shm_vector = NULL;
|
||||
netdata_ebpf_shm_t *shm_vector = NULL;
|
||||
|
||||
static netdata_idx_t shm_hash_values[NETDATA_SHM_END];
|
||||
static netdata_idx_t *shm_values = NULL;
|
||||
|
@ -453,6 +453,10 @@ static void ebpf_shm_exit(void *pptr)
|
|||
ebpf_module_t *em = CLEANUP_FUNCTION_GET_PTR(pptr);
|
||||
if(!em) return;
|
||||
|
||||
pthread_mutex_lock(&lock);
|
||||
collect_pids &= ~(1<<EBPF_MODULE_SHM_IDX);
|
||||
pthread_mutex_unlock(&lock);
|
||||
|
||||
if (ebpf_read_shm.thread)
|
||||
nd_thread_signal_cancel(ebpf_read_shm.thread);
|
||||
|
||||
|
@ -506,16 +510,23 @@ static void ebpf_shm_exit(void *pptr)
|
|||
* @param out the vector with read values.
|
||||
* @param maps_per_core do I need to read all cores?
|
||||
*/
|
||||
static void shm_apps_accumulator(netdata_publish_shm_t *out, int maps_per_core)
|
||||
static void shm_apps_accumulator(netdata_ebpf_shm_t *out, int maps_per_core)
|
||||
{
|
||||
int i, end = (maps_per_core) ? ebpf_nprocs : 1;
|
||||
netdata_publish_shm_t *total = &out[0];
|
||||
netdata_ebpf_shm_t *total = &out[0];
|
||||
uint64_t ct = total->ct;
|
||||
for (i = 1; i < end; i++) {
|
||||
netdata_publish_shm_t *w = &out[i];
|
||||
netdata_ebpf_shm_t *w = &out[i];
|
||||
total->get += w->get;
|
||||
total->at += w->at;
|
||||
total->dt += w->dt;
|
||||
total->ctl += w->ctl;
|
||||
|
||||
if (w->ct > ct)
|
||||
ct = w->ct;
|
||||
|
||||
if (!total->name[0] && w->name[0])
|
||||
strncpyz(total->name, w->name, sizeof(total->name) - 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -528,7 +539,7 @@ static void shm_apps_accumulator(netdata_publish_shm_t *out, int maps_per_core)
|
|||
*/
|
||||
static void ebpf_update_shm_cgroup()
|
||||
{
|
||||
netdata_publish_shm_t *cv = shm_vector;
|
||||
netdata_ebpf_shm_t *cv = shm_vector;
|
||||
size_t length = sizeof(netdata_publish_shm_t);
|
||||
|
||||
ebpf_cgroup_target_t *ect;
|
||||
|
@ -541,12 +552,12 @@ static void ebpf_update_shm_cgroup()
|
|||
for (pids = ect->pids; pids; pids = pids->next) {
|
||||
int pid = pids->pid;
|
||||
netdata_publish_shm_t *out = &pids->shm;
|
||||
ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
|
||||
if (local_pid) {
|
||||
netdata_publish_shm_t *in = &local_pid->shm;
|
||||
ebpf_pid_data_t *local_pid = ebpf_get_pid_data(pid, 0, NULL, EBPF_MODULE_SHM_IDX);
|
||||
netdata_publish_shm_t *in = local_pid->shm;
|
||||
if (!in)
|
||||
continue;
|
||||
|
||||
memcpy(out, in, sizeof(netdata_publish_shm_t));
|
||||
}
|
||||
memcpy(out, in, sizeof(netdata_publish_shm_t));
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&mutex_cgroup_shm);
|
||||
|
@ -558,12 +569,13 @@ static void ebpf_update_shm_cgroup()
|
|||
* Read the apps table and store data inside the structure.
|
||||
*
|
||||
* @param maps_per_core do I need to read all cores?
|
||||
* @param max_period limit of iterations without updates before remove data from hash table
|
||||
*/
|
||||
static void ebpf_read_shm_apps_table(int maps_per_core, int max_period)
|
||||
static void ebpf_read_shm_apps_table(int maps_per_core, uint32_t max_period)
|
||||
{
|
||||
netdata_publish_shm_t *cv = shm_vector;
|
||||
netdata_ebpf_shm_t *cv = shm_vector;
|
||||
int fd = shm_maps[NETDATA_PID_SHM_TABLE].map_fd;
|
||||
size_t length = sizeof(netdata_publish_shm_t);
|
||||
size_t length = sizeof(netdata_ebpf_shm_t);
|
||||
if (maps_per_core)
|
||||
length *= ebpf_nprocs;
|
||||
|
||||
|
@ -575,18 +587,18 @@ static void ebpf_read_shm_apps_table(int maps_per_core, int max_period)
|
|||
|
||||
shm_apps_accumulator(cv, maps_per_core);
|
||||
|
||||
ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(key, 0);
|
||||
if (!local_pid)
|
||||
goto end_shm_loop;
|
||||
ebpf_pid_data_t *local_pid = ebpf_get_pid_data(key, cv->tgid, cv->name, EBPF_MODULE_SHM_IDX);
|
||||
netdata_publish_shm_t *publish = local_pid->shm;
|
||||
if (!publish)
|
||||
local_pid->shm = publish = ebpf_shm_allocate_publish();
|
||||
|
||||
|
||||
netdata_publish_shm_t *publish = &local_pid->shm;
|
||||
if (!publish->ct || publish->ct != cv->ct) {
|
||||
memcpy(publish, &cv[0], sizeof(netdata_publish_shm_t));
|
||||
local_pid->not_updated = 0;
|
||||
} else if (++local_pid->not_updated >= max_period){
|
||||
bpf_map_delete_elem(fd, &key);
|
||||
local_pid->not_updated = 0;
|
||||
ebpf_release_pid_data(local_pid, fd, key, EBPF_MODULE_SHM_IDX);
|
||||
ebpf_shm_release_publish(publish);
|
||||
local_pid->shm = NULL;
|
||||
}
|
||||
|
||||
end_shm_loop:
|
||||
|
@ -654,23 +666,17 @@ static void ebpf_shm_read_global_table(netdata_idx_t *stats, int maps_per_core)
|
|||
static void ebpf_shm_sum_pids(netdata_publish_shm_t *shm, struct ebpf_pid_on_target *root)
|
||||
{
|
||||
memset(shm, 0, sizeof(netdata_publish_shm_t));
|
||||
while (root) {
|
||||
for (; root; root = root->next) {
|
||||
int32_t pid = root->pid;
|
||||
ebpf_pid_stat_t *pid_stat = ebpf_get_pid_entry(pid, 0);
|
||||
if (pid_stat) {
|
||||
netdata_publish_shm_t *w = &pid_stat->shm;
|
||||
shm->get += w->get;
|
||||
shm->at += w->at;
|
||||
shm->dt += w->dt;
|
||||
shm->ctl += w->ctl;
|
||||
ebpf_pid_data_t *pid_stat = ebpf_get_pid_data(pid, 0, NULL, EBPF_MODULE_SHM_IDX);
|
||||
netdata_publish_shm_t *w = pid_stat->shm;
|
||||
if (!w)
|
||||
continue;
|
||||
|
||||
// reset for next collection.
|
||||
w->get = 0;
|
||||
w->at = 0;
|
||||
w->dt = 0;
|
||||
w->ctl = 0;
|
||||
}
|
||||
root = root->next;
|
||||
shm->get += w->get;
|
||||
shm->at += w->at;
|
||||
shm->dt += w->dt;
|
||||
shm->ctl += w->ctl;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1060,13 +1066,16 @@ void *ebpf_read_shm_thread(void *ptr)
|
|||
|
||||
int maps_per_core = em->maps_per_core;
|
||||
int update_every = em->update_every;
|
||||
int collect_pid = (em->apps_charts || em->cgroup_charts);
|
||||
if (!collect_pid)
|
||||
return NULL;
|
||||
|
||||
int counter = update_every - 1;
|
||||
|
||||
uint32_t lifetime = em->lifetime;
|
||||
uint32_t running_time = 0;
|
||||
usec_t period = update_every * USEC_PER_SEC;
|
||||
int max_period = update_every * EBPF_CLEANUP_FACTOR;
|
||||
uint32_t max_period = EBPF_CLEANUP_FACTOR;
|
||||
while (!ebpf_plugin_stop() && running_time < lifetime) {
|
||||
(void)heartbeat_next(&hb, period);
|
||||
if (ebpf_plugin_stop() || ++counter != update_every)
|
||||
|
@ -1363,7 +1372,8 @@ void *ebpf_shm_thread(void *ptr)
|
|||
ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
|
||||
pthread_mutex_unlock(&lock);
|
||||
|
||||
ebpf_read_shm.thread = nd_thread_create(ebpf_read_shm.name, NETDATA_THREAD_OPTION_DEFAULT, ebpf_read_shm_thread, em);
|
||||
ebpf_read_shm.thread = nd_thread_create(ebpf_read_shm.name, NETDATA_THREAD_OPTION_DEFAULT,
|
||||
ebpf_read_shm_thread, em);
|
||||
|
||||
shm_collector(em);
|
||||
|
||||
|
|
|
@ -28,15 +28,27 @@
|
|||
#define NETDATA_SYSTEMD_SHM_DT_CONTEXT "systemd.service.shmdt"
|
||||
#define NETDATA_SYSTEMD_SHM_CTL_CONTEXT "systemd.service.shmctl"
|
||||
|
||||
typedef struct netdata_publish_shm {
|
||||
typedef struct __attribute__((packed)) netdata_publish_shm {
|
||||
uint64_t ct;
|
||||
|
||||
uint32_t get;
|
||||
uint32_t at;
|
||||
uint32_t dt;
|
||||
uint32_t ctl;
|
||||
} netdata_publish_shm_t;
|
||||
|
||||
typedef struct netdata_ebpf_shm {
|
||||
uint64_t ct;
|
||||
uint32_t tgid;
|
||||
uint32_t uid;
|
||||
uint32_t gid;
|
||||
char name[TASK_COMM_LEN];
|
||||
|
||||
uint64_t get;
|
||||
uint64_t at;
|
||||
uint64_t dt;
|
||||
uint64_t ctl;
|
||||
} netdata_publish_shm_t;
|
||||
uint32_t get;
|
||||
uint32_t at;
|
||||
uint32_t dt;
|
||||
uint32_t ctl;
|
||||
} netdata_ebpf_shm_t;
|
||||
|
||||
enum shm_tables {
|
||||
NETDATA_PID_SHM_TABLE,
|
||||
|
|
|
@ -497,6 +497,10 @@ static void ebpf_socket_free(ebpf_module_t *em )
|
|||
ebpf_update_stats(&plugin_statistics, em);
|
||||
ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_REMOVE);
|
||||
pthread_mutex_unlock(&ebpf_exit_cleanup);
|
||||
|
||||
pthread_mutex_lock(&lock);
|
||||
collect_pids &= ~(1<<EBPF_MODULE_SOCKET_IDX);
|
||||
pthread_mutex_unlock(&lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1674,6 +1678,7 @@ static void ebpf_update_array_vectors(ebpf_module_t *em)
|
|||
time_t update_time = time(NULL);
|
||||
while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
|
||||
test = bpf_map_lookup_elem(fd, &key, values);
|
||||
bool deleted = true;
|
||||
if (test < 0) {
|
||||
goto end_socket_loop;
|
||||
}
|
||||
|
@ -1683,7 +1688,6 @@ static void ebpf_update_array_vectors(ebpf_module_t *em)
|
|||
}
|
||||
|
||||
ebpf_hash_socket_accumulator(values, end);
|
||||
ebpf_socket_fill_publish_apps(key.pid, values);
|
||||
|
||||
// We update UDP to show info with charts, but we do not show them with functions
|
||||
/*
|
||||
|
@ -1727,14 +1731,17 @@ static void ebpf_update_array_vectors(ebpf_module_t *em)
|
|||
}
|
||||
uint64_t prev_period = socket_ptr->data.current_timestamp;
|
||||
memcpy(&socket_ptr->data, &values[0], sizeof(netdata_socket_t));
|
||||
if (translate)
|
||||
if (translate) {
|
||||
ebpf_socket_translate(socket_ptr, &key);
|
||||
else { // Check socket was updated
|
||||
deleted = false;
|
||||
} else { // Check socket was updated
|
||||
deleted = false;
|
||||
if (prev_period) {
|
||||
if (values[0].current_timestamp > prev_period) // Socket updated
|
||||
socket_ptr->last_update = update_time;
|
||||
else if ((update_time - socket_ptr->last_update) > em->update_every) {
|
||||
// Socket was not updated since last read
|
||||
deleted = true;
|
||||
JudyLDel(&pid_ptr->socket_stats.JudyLArray, values[0].first_timestamp, PJE0);
|
||||
aral_freez(aral_socket_table, socket_ptr);
|
||||
}
|
||||
|
@ -1745,7 +1752,19 @@ static void ebpf_update_array_vectors(ebpf_module_t *em)
|
|||
rw_spinlock_write_unlock(&pid_ptr->socket_stats.rw_spinlock);
|
||||
rw_spinlock_write_unlock(&ebpf_judy_pid.index.rw_spinlock);
|
||||
|
||||
end_socket_loop:
|
||||
end_socket_loop: ; // the empty statement is here to allow code to be compiled by old compilers
|
||||
ebpf_pid_data_t *local_pid = ebpf_get_pid_data(key.pid, 0, values[0].name, EBPF_MODULE_SOCKET_IDX);
|
||||
ebpf_socket_publish_apps_t *curr = local_pid->socket;
|
||||
if (!curr)
|
||||
local_pid->socket = curr = ebpf_socket_allocate_publish();
|
||||
|
||||
if (!deleted)
|
||||
ebpf_socket_fill_publish_apps(curr, values);
|
||||
else {
|
||||
ebpf_release_pid_data(local_pid, fd, key.pid, EBPF_MODULE_SOCKET_IDX);
|
||||
ebpf_socket_release_publish(curr);
|
||||
local_pid->socket = NULL;
|
||||
}
|
||||
memset(values, 0, length);
|
||||
memcpy(&key, &next_key, sizeof(key));
|
||||
}
|
||||
|
@ -1765,23 +1784,22 @@ void ebpf_socket_resume_apps_data()
|
|||
|
||||
ebpf_socket_publish_apps_t *values = &w->socket;
|
||||
memset(&w->socket, 0, sizeof(ebpf_socket_publish_apps_t));
|
||||
while (move) {
|
||||
for (; move; move = move->next) {
|
||||
int32_t pid = move->pid;
|
||||
ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
|
||||
if (local_pid) {
|
||||
ebpf_socket_publish_apps_t *ws = &local_pid->socket;
|
||||
values->call_tcp_v4_connection = ws->call_tcp_v4_connection;
|
||||
values->call_tcp_v6_connection = ws->call_tcp_v6_connection;
|
||||
values->bytes_sent = ws->bytes_sent;
|
||||
values->bytes_received = ws->bytes_received;
|
||||
values->call_tcp_sent = ws->call_tcp_sent;
|
||||
values->call_tcp_received = ws->call_tcp_received;
|
||||
values->retransmit = ws->retransmit;
|
||||
values->call_udp_sent = ws->call_udp_sent;
|
||||
values->call_udp_received = ws->call_udp_received;
|
||||
}
|
||||
ebpf_pid_data_t *local_pid = ebpf_get_pid_data(pid, 0, NULL, EBPF_MODULE_SOCKET_IDX);
|
||||
ebpf_socket_publish_apps_t *ws = local_pid->socket;
|
||||
if (!ws)
|
||||
continue;
|
||||
|
||||
move = move->next;
|
||||
values->call_tcp_v4_connection = ws->call_tcp_v4_connection;
|
||||
values->call_tcp_v6_connection = ws->call_tcp_v6_connection;
|
||||
values->bytes_sent = ws->bytes_sent;
|
||||
values->bytes_received = ws->bytes_received;
|
||||
values->call_tcp_sent = ws->call_tcp_sent;
|
||||
values->call_tcp_received = ws->call_tcp_received;
|
||||
values->retransmit = ws->retransmit;
|
||||
values->call_udp_sent = ws->call_udp_sent;
|
||||
values->call_udp_received = ws->call_udp_received;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1806,6 +1824,9 @@ void *ebpf_read_socket_thread(void *ptr)
|
|||
|
||||
int update_every = em->update_every;
|
||||
int counter = update_every - 1;
|
||||
int collect_pid = (em->apps_charts || em->cgroup_charts);
|
||||
if (!collect_pid)
|
||||
return NULL;
|
||||
|
||||
uint32_t running_time = 0;
|
||||
uint32_t lifetime = em->lifetime;
|
||||
|
@ -1969,14 +1990,8 @@ static void ebpf_socket_read_hash_global_tables(netdata_idx_t *stats, int maps_p
|
|||
* @param current_pid the PID that I am updating
|
||||
* @param ns the structure with data read from memory.
|
||||
*/
|
||||
void ebpf_socket_fill_publish_apps(uint32_t current_pid, netdata_socket_t *ns)
|
||||
void ebpf_socket_fill_publish_apps(ebpf_socket_publish_apps_t *curr, netdata_socket_t *ns)
|
||||
{
|
||||
ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(current_pid, 0);
|
||||
if (!local_pid)
|
||||
return;
|
||||
|
||||
ebpf_socket_publish_apps_t *curr = &local_pid->socket;
|
||||
|
||||
curr->bytes_sent = ns->tcp.tcp_bytes_sent;
|
||||
curr->bytes_received = ns->tcp.tcp_bytes_received;
|
||||
curr->call_tcp_sent = ns->tcp.call_tcp_sent;
|
||||
|
@ -2005,21 +2020,21 @@ static void ebpf_update_socket_cgroup()
|
|||
for (pids = ect->pids; pids; pids = pids->next) {
|
||||
int pid = pids->pid;
|
||||
ebpf_socket_publish_apps_t *publish = &ect->publish_socket;
|
||||
ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
|
||||
if (local_pid) {
|
||||
ebpf_socket_publish_apps_t *in = &local_pid->socket;
|
||||
ebpf_pid_data_t *local_pid = ebpf_get_pid_data(pid, 0, NULL, EBPF_MODULE_SOCKET_IDX);
|
||||
ebpf_socket_publish_apps_t *in = local_pid->socket;
|
||||
if (!in)
|
||||
continue;
|
||||
|
||||
publish->bytes_sent = in->bytes_sent;
|
||||
publish->bytes_received = in->bytes_received;
|
||||
publish->call_tcp_sent = in->call_tcp_sent;
|
||||
publish->call_tcp_received = in->call_tcp_received;
|
||||
publish->retransmit = in->retransmit;
|
||||
publish->call_udp_sent = in->call_udp_sent;
|
||||
publish->call_udp_received = in->call_udp_received;
|
||||
publish->call_close = in->call_close;
|
||||
publish->call_tcp_v4_connection = in->call_tcp_v4_connection;
|
||||
publish->call_tcp_v6_connection = in->call_tcp_v6_connection;
|
||||
}
|
||||
publish->bytes_sent = in->bytes_sent;
|
||||
publish->bytes_received = in->bytes_received;
|
||||
publish->call_tcp_sent = in->call_tcp_sent;
|
||||
publish->call_tcp_received = in->call_tcp_received;
|
||||
publish->retransmit = in->retransmit;
|
||||
publish->call_udp_sent = in->call_udp_sent;
|
||||
publish->call_udp_received = in->call_udp_received;
|
||||
publish->call_close = in->call_close;
|
||||
publish->call_tcp_v4_connection = in->call_tcp_v4_connection;
|
||||
publish->call_tcp_v6_connection = in->call_tcp_v6_connection;
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&mutex_cgroup_shm);
|
||||
|
|
|
@ -157,7 +157,7 @@ typedef enum ebpf_socket_idx {
|
|||
#define NETDATA_EBPF_PID_SOCKET_ARAL_TABLE_NAME "ebpf_pid_socket"
|
||||
#define NETDATA_EBPF_SOCKET_ARAL_TABLE_NAME "ebpf_socket_tbl"
|
||||
|
||||
typedef struct ebpf_socket_publish_apps {
|
||||
typedef struct __attribute__((packed)) ebpf_socket_publish_apps {
|
||||
// Data read
|
||||
uint64_t bytes_sent; // Bytes sent
|
||||
uint64_t bytes_received; // Bytes received
|
||||
|
@ -342,8 +342,7 @@ void ebpf_parse_service_name_section(struct config *cfg);
|
|||
void ebpf_parse_ips_unsafe(char *ptr);
|
||||
void ebpf_parse_ports(char *ptr);
|
||||
void ebpf_socket_read_open_connections(BUFFER *buf, struct ebpf_module *em);
|
||||
void ebpf_socket_fill_publish_apps(uint32_t current_pid, netdata_socket_t *ns);
|
||||
|
||||
void ebpf_socket_fill_publish_apps(ebpf_socket_publish_apps_t *curr, netdata_socket_t *ns);
|
||||
|
||||
extern struct config socket_config;
|
||||
extern netdata_ebpf_targets_t socket_targets[];
|
||||
|
|
|
@ -10,7 +10,7 @@ static netdata_publish_syscall_t swap_publish_aggregated[NETDATA_SWAP_END];
|
|||
static netdata_idx_t swap_hash_values[NETDATA_SWAP_END];
|
||||
static netdata_idx_t *swap_values = NULL;
|
||||
|
||||
netdata_publish_swap_t *swap_vector = NULL;
|
||||
netdata_ebpf_swap_t *swap_vector = NULL;
|
||||
|
||||
struct config swap_config = { .first_section = NULL,
|
||||
.last_section = NULL,
|
||||
|
@ -393,6 +393,10 @@ static void ebpf_swap_exit(void *ptr)
|
|||
{
|
||||
ebpf_module_t *em = (ebpf_module_t *)ptr;
|
||||
|
||||
pthread_mutex_lock(&lock);
|
||||
collect_pids &= ~(1<<EBPF_MODULE_SWAP_IDX);
|
||||
pthread_mutex_unlock(&lock);
|
||||
|
||||
if (ebpf_read_swap.thread)
|
||||
nd_thread_signal_cancel(ebpf_read_swap.thread);
|
||||
|
||||
|
@ -447,14 +451,21 @@ static void ebpf_swap_exit(void *ptr)
|
|||
* @param out the vector with read values.
|
||||
* @param maps_per_core do I need to read all cores?
|
||||
*/
|
||||
static void swap_apps_accumulator(netdata_publish_swap_t *out, int maps_per_core)
|
||||
static void swap_apps_accumulator(netdata_ebpf_swap_t *out, int maps_per_core)
|
||||
{
|
||||
int i, end = (maps_per_core) ? ebpf_nprocs : 1;
|
||||
netdata_publish_swap_t *total = &out[0];
|
||||
netdata_ebpf_swap_t *total = &out[0];
|
||||
uint64_t ct = total->ct;
|
||||
for (i = 1; i < end; i++) {
|
||||
netdata_publish_swap_t *w = &out[i];
|
||||
netdata_ebpf_swap_t *w = &out[i];
|
||||
total->write += w->write;
|
||||
total->read += w->read;
|
||||
|
||||
if (w->ct > ct)
|
||||
ct = w->ct;
|
||||
|
||||
if (!total->name[0] && w->name[0])
|
||||
strncpyz(total->name, w->name, sizeof(total->name) - 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -472,12 +483,11 @@ static void ebpf_update_swap_cgroup()
|
|||
for (pids = ect->pids; pids; pids = pids->next) {
|
||||
int pid = pids->pid;
|
||||
netdata_publish_swap_t *out = &pids->swap;
|
||||
ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
|
||||
if (local_pid) {
|
||||
netdata_publish_swap_t *in = &local_pid->swap;
|
||||
|
||||
memcpy(out, in, sizeof(netdata_publish_swap_t));
|
||||
}
|
||||
ebpf_pid_data_t *local_pid = ebpf_get_pid_data(pid, 0, NULL, EBPF_MODULE_SWAP_IDX);
|
||||
netdata_publish_swap_t *in = local_pid->swap;
|
||||
if (!in)
|
||||
continue;
|
||||
memcpy(out, in, sizeof(netdata_publish_swap_t));
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&mutex_cgroup_shm);
|
||||
|
@ -496,15 +506,15 @@ static void ebpf_swap_sum_pids(netdata_publish_swap_t *swap, struct ebpf_pid_on_
|
|||
uint64_t local_read = 0;
|
||||
uint64_t local_write = 0;
|
||||
|
||||
while (root) {
|
||||
for (; root; root = root->next) {
|
||||
int32_t pid = root->pid;
|
||||
ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
|
||||
if (local_pid) {
|
||||
netdata_publish_swap_t *w = &local_pid->swap;
|
||||
local_write += w->write;
|
||||
local_read += w->read;
|
||||
}
|
||||
root = root->next;
|
||||
ebpf_pid_data_t *local_pid = ebpf_get_pid_data(pid, 0, NULL, EBPF_MODULE_SWAP_IDX);
|
||||
netdata_publish_swap_t *w = local_pid->swap;
|
||||
if (!w)
|
||||
continue;
|
||||
|
||||
local_write += w->write;
|
||||
local_read += w->read;
|
||||
}
|
||||
|
||||
// These conditions were added, because we are using incremental algorithm
|
||||
|
@ -532,12 +542,13 @@ void ebpf_swap_resume_apps_data() {
|
|||
* Read the apps table and store data inside the structure.
|
||||
*
|
||||
* @param maps_per_core do I need to read all cores?
|
||||
* @param max_period limit of iterations without updates before remove data from hash table
|
||||
*/
|
||||
static void ebpf_read_swap_apps_table(int maps_per_core, int max_period)
|
||||
static void ebpf_read_swap_apps_table(int maps_per_core, uint32_t max_period)
|
||||
{
|
||||
netdata_publish_swap_t *cv = swap_vector;
|
||||
netdata_ebpf_swap_t *cv = swap_vector;
|
||||
int fd = swap_maps[NETDATA_PID_SWAP_TABLE].map_fd;
|
||||
size_t length = sizeof(netdata_publish_swap_t);
|
||||
size_t length = sizeof(netdata_ebpf_swap_t);
|
||||
if (maps_per_core)
|
||||
length *= ebpf_nprocs;
|
||||
|
||||
|
@ -549,17 +560,17 @@ static void ebpf_read_swap_apps_table(int maps_per_core, int max_period)
|
|||
|
||||
swap_apps_accumulator(cv, maps_per_core);
|
||||
|
||||
ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(key, cv->tgid);
|
||||
if (!local_pid)
|
||||
goto end_swap_loop;
|
||||
|
||||
netdata_publish_swap_t *publish = &local_pid->swap;
|
||||
ebpf_pid_data_t *local_pid = ebpf_get_pid_data(key, cv->tgid, cv->name, EBPF_MODULE_SWAP_IDX);
|
||||
netdata_publish_swap_t *publish = local_pid->swap;
|
||||
if (!publish)
|
||||
local_pid->swap = publish = ebpf_swap_allocate_publish_swap();
|
||||
if (!publish->ct || publish->ct != cv->ct) {
|
||||
memcpy(publish, cv, sizeof(netdata_publish_swap_t));
|
||||
local_pid->not_updated = 0;
|
||||
} else if (++local_pid->not_updated >= max_period) {
|
||||
bpf_map_delete_elem(fd, &key);
|
||||
local_pid->not_updated = 0;
|
||||
} else if (++local_pid->not_updated >= max_period && !local_pid->has_proc_file) {
|
||||
ebpf_release_pid_data(local_pid, fd, key, EBPF_MODULE_SWAP_IDX);
|
||||
ebpf_release_publish_swap(publish);
|
||||
local_pid->swap = NULL;
|
||||
}
|
||||
|
||||
// We are cleaning to avoid passing data read from one process to other.
|
||||
|
@ -587,13 +598,16 @@ void *ebpf_read_swap_thread(void *ptr)
|
|||
|
||||
int maps_per_core = em->maps_per_core;
|
||||
int update_every = em->update_every;
|
||||
int collect_pid = (em->apps_charts || em->cgroup_charts);
|
||||
if (!collect_pid)
|
||||
return NULL;
|
||||
|
||||
int counter = update_every - 1;
|
||||
|
||||
uint32_t lifetime = em->lifetime;
|
||||
uint32_t running_time = 0;
|
||||
usec_t period = update_every * USEC_PER_SEC;
|
||||
int max_period = update_every * EBPF_CLEANUP_FACTOR;
|
||||
uint32_t max_period = EBPF_CLEANUP_FACTOR;
|
||||
|
||||
while (!ebpf_plugin_stop() && running_time < lifetime) {
|
||||
(void)heartbeat_next(&hb, period);
|
||||
|
@ -1017,7 +1031,7 @@ void ebpf_swap_create_apps_charts(struct ebpf_module *em, void *ptr)
|
|||
*/
|
||||
static void ebpf_swap_allocate_global_vectors()
|
||||
{
|
||||
swap_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_publish_swap_t));
|
||||
swap_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_ebpf_swap_t));
|
||||
|
||||
swap_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
|
||||
|
||||
|
|
|
@ -24,16 +24,23 @@
|
|||
#define NETDATA_SYSTEMD_SWAP_READ_CONTEXT "systemd.service.swap_read"
|
||||
#define NETDATA_SYSTEMD_SWAP_WRITE_CONTEXT "systemd.service.swap_write"
|
||||
|
||||
typedef struct netdata_publish_swap {
|
||||
typedef struct __attribute__((packed)) netdata_publish_swap {
|
||||
uint64_t ct;
|
||||
|
||||
uint32_t read;
|
||||
uint32_t write;
|
||||
} netdata_publish_swap_t;
|
||||
|
||||
typedef struct netdata_ebpf_swap {
|
||||
uint64_t ct;
|
||||
uint32_t tgid;
|
||||
uint32_t uid;
|
||||
uint32_t gid;
|
||||
char name[TASK_COMM_LEN];
|
||||
|
||||
uint64_t read;
|
||||
uint64_t write;
|
||||
} netdata_publish_swap_t;
|
||||
uint32_t read;
|
||||
uint32_t write;
|
||||
} netdata_ebpf_swap_t;
|
||||
|
||||
enum swap_tables {
|
||||
NETDATA_PID_SWAP_TABLE,
|
||||
|
|
|
@ -11,7 +11,7 @@ static char *vfs_id_names[NETDATA_KEY_PUBLISH_VFS_END] = { "vfs_unlink", "vfs_re
|
|||
static netdata_idx_t *vfs_hash_values = NULL;
|
||||
static netdata_syscall_stat_t vfs_aggregated_data[NETDATA_KEY_PUBLISH_VFS_END];
|
||||
static netdata_publish_syscall_t vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_END];
|
||||
netdata_publish_vfs_t *vfs_vector = NULL;
|
||||
netdata_ebpf_vfs_t *vfs_vector = NULL;
|
||||
|
||||
static ebpf_local_maps_t vfs_maps[] = {{.name = "tbl_vfs_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
|
||||
.user_input = 0, .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
|
||||
|
@ -881,6 +881,10 @@ static void ebpf_vfs_exit(void *pptr)
|
|||
ebpf_module_t *em = CLEANUP_FUNCTION_GET_PTR(pptr);
|
||||
if(!em) return;
|
||||
|
||||
pthread_mutex_lock(&lock);
|
||||
collect_pids &= ~(1<<EBPF_MODULE_VFS_IDX);
|
||||
pthread_mutex_unlock(&lock);
|
||||
|
||||
if (ebpf_read_vfs.thread)
|
||||
nd_thread_signal_cancel(ebpf_read_vfs.thread);
|
||||
|
||||
|
@ -1028,6 +1032,74 @@ static void ebpf_vfs_read_global_table(netdata_idx_t *stats, int maps_per_core)
|
|||
(uint64_t)res[NETDATA_KEY_BYTES_VFS_READV];
|
||||
}
|
||||
|
||||
/**
|
||||
* Set VFS
|
||||
*
|
||||
* Set vfs structure with values from ebpf structure.
|
||||
*
|
||||
* @param vfs the output structure.
|
||||
* @param w the input data.
|
||||
*/
|
||||
static inline void vfs_aggregate_set_vfs(netdata_publish_vfs_t *vfs, netdata_ebpf_vfs_t *w)
|
||||
{
|
||||
vfs->write_call = w->write_call;
|
||||
vfs->writev_call = w->writev_call;
|
||||
vfs->read_call = w->read_call;
|
||||
vfs->readv_call = w->readv_call;
|
||||
vfs->unlink_call = w->unlink_call;
|
||||
vfs->fsync_call = w->fsync_call;
|
||||
vfs->open_call = w->open_call;
|
||||
vfs->create_call = w->create_call;
|
||||
|
||||
vfs->write_bytes = w->write_bytes;
|
||||
vfs->writev_bytes = w->writev_bytes;
|
||||
vfs->read_bytes = w->read_bytes;
|
||||
vfs->readv_bytes = w->readv_bytes;
|
||||
|
||||
vfs->write_err = w->write_err;
|
||||
vfs->writev_err = w->writev_err;
|
||||
vfs->read_err = w->read_err;
|
||||
vfs->readv_err = w->readv_err;
|
||||
vfs->unlink_err = w->unlink_err;
|
||||
vfs->fsync_err = w->fsync_err;
|
||||
vfs->open_err = w->open_err;
|
||||
vfs->create_err = w->create_err;
|
||||
}
|
||||
|
||||
/**
|
||||
* Aggregate Publish VFS
|
||||
*
|
||||
* Aggregate data from w source.
|
||||
*
|
||||
* @param vfs the output structure.
|
||||
* @param w the input data.
|
||||
*/
|
||||
static inline void vfs_aggregate_publish_vfs(netdata_publish_vfs_t *vfs, netdata_publish_vfs_t *w)
|
||||
{
|
||||
vfs->write_call += w->write_call;
|
||||
vfs->writev_call += w->writev_call;
|
||||
vfs->read_call += w->read_call;
|
||||
vfs->readv_call += w->readv_call;
|
||||
vfs->unlink_call += w->unlink_call;
|
||||
vfs->fsync_call += w->fsync_call;
|
||||
vfs->open_call += w->open_call;
|
||||
vfs->create_call += w->create_call;
|
||||
|
||||
vfs->write_bytes += w->write_bytes;
|
||||
vfs->writev_bytes += w->writev_bytes;
|
||||
vfs->read_bytes += w->read_bytes;
|
||||
vfs->readv_bytes += w->readv_bytes;
|
||||
|
||||
vfs->write_err += w->write_err;
|
||||
vfs->writev_err += w->writev_err;
|
||||
vfs->read_err += w->read_err;
|
||||
vfs->readv_err += w->readv_err;
|
||||
vfs->unlink_err += w->unlink_err;
|
||||
vfs->fsync_err += w->fsync_err;
|
||||
vfs->open_err += w->open_err;
|
||||
vfs->create_err += w->create_err;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sum PIDs
|
||||
*
|
||||
|
@ -1038,63 +1110,17 @@ static void ebpf_vfs_read_global_table(netdata_idx_t *stats, int maps_per_core)
|
|||
*/
|
||||
static void ebpf_vfs_sum_pids(netdata_publish_vfs_t *vfs, struct ebpf_pid_on_target *root)
|
||||
{
|
||||
netdata_publish_vfs_t accumulator;
|
||||
memset(&accumulator, 0, sizeof(accumulator));
|
||||
memset(vfs, 0, sizeof(netdata_publish_vfs_t));
|
||||
|
||||
while (root) {
|
||||
for (; root; root = root->next) {
|
||||
int32_t pid = root->pid;
|
||||
ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
|
||||
if (local_pid) {
|
||||
netdata_publish_vfs_t *w = &local_pid->vfs;
|
||||
accumulator.write_call += w->write_call;
|
||||
accumulator.writev_call += w->writev_call;
|
||||
accumulator.read_call += w->read_call;
|
||||
accumulator.readv_call += w->readv_call;
|
||||
accumulator.unlink_call += w->unlink_call;
|
||||
accumulator.fsync_call += w->fsync_call;
|
||||
accumulator.open_call += w->open_call;
|
||||
accumulator.create_call += w->create_call;
|
||||
ebpf_pid_data_t *local_pid = ebpf_get_pid_data(pid, 0, NULL, EBPF_MODULE_VFS_IDX);
|
||||
netdata_publish_vfs_t *w = local_pid->vfs;
|
||||
if (!w)
|
||||
continue;
|
||||
|
||||
accumulator.write_bytes += w->write_bytes;
|
||||
accumulator.writev_bytes += w->writev_bytes;
|
||||
accumulator.read_bytes += w->read_bytes;
|
||||
accumulator.readv_bytes += w->readv_bytes;
|
||||
|
||||
accumulator.write_err += w->write_err;
|
||||
accumulator.writev_err += w->writev_err;
|
||||
accumulator.read_err += w->read_err;
|
||||
accumulator.readv_err += w->readv_err;
|
||||
accumulator.unlink_err += w->unlink_err;
|
||||
accumulator.fsync_err += w->fsync_err;
|
||||
accumulator.open_err += w->open_err;
|
||||
accumulator.create_err += w->create_err;
|
||||
}
|
||||
root = root->next;
|
||||
vfs_aggregate_publish_vfs(vfs, w);
|
||||
}
|
||||
|
||||
// These conditions were added, because we are using incremental algorithm
|
||||
vfs->write_call = (accumulator.write_call >= vfs->write_call) ? accumulator.write_call : vfs->write_call;
|
||||
vfs->writev_call = (accumulator.writev_call >= vfs->writev_call) ? accumulator.writev_call : vfs->writev_call;
|
||||
vfs->read_call = (accumulator.read_call >= vfs->read_call) ? accumulator.read_call : vfs->read_call;
|
||||
vfs->readv_call = (accumulator.readv_call >= vfs->readv_call) ? accumulator.readv_call : vfs->readv_call;
|
||||
vfs->unlink_call = (accumulator.unlink_call >= vfs->unlink_call) ? accumulator.unlink_call : vfs->unlink_call;
|
||||
vfs->fsync_call = (accumulator.fsync_call >= vfs->fsync_call) ? accumulator.fsync_call : vfs->fsync_call;
|
||||
vfs->open_call = (accumulator.open_call >= vfs->open_call) ? accumulator.open_call : vfs->open_call;
|
||||
vfs->create_call = (accumulator.create_call >= vfs->create_call) ? accumulator.create_call : vfs->create_call;
|
||||
|
||||
vfs->write_bytes = (accumulator.write_bytes >= vfs->write_bytes) ? accumulator.write_bytes : vfs->write_bytes;
|
||||
vfs->writev_bytes = (accumulator.writev_bytes >= vfs->writev_bytes) ? accumulator.writev_bytes : vfs->writev_bytes;
|
||||
vfs->read_bytes = (accumulator.read_bytes >= vfs->read_bytes) ? accumulator.read_bytes : vfs->read_bytes;
|
||||
vfs->readv_bytes = (accumulator.readv_bytes >= vfs->readv_bytes) ? accumulator.readv_bytes : vfs->readv_bytes;
|
||||
|
||||
vfs->write_err = (accumulator.write_err >= vfs->write_err) ? accumulator.write_err : vfs->write_err;
|
||||
vfs->writev_err = (accumulator.writev_err >= vfs->writev_err) ? accumulator.writev_err : vfs->writev_err;
|
||||
vfs->read_err = (accumulator.read_err >= vfs->read_err) ? accumulator.read_err : vfs->read_err;
|
||||
vfs->readv_err = (accumulator.readv_err >= vfs->readv_err) ? accumulator.readv_err : vfs->readv_err;
|
||||
vfs->unlink_err = (accumulator.unlink_err >= vfs->unlink_err) ? accumulator.unlink_err : vfs->unlink_err;
|
||||
vfs->fsync_err = (accumulator.fsync_err >= vfs->fsync_err) ? accumulator.fsync_err : vfs->fsync_err;
|
||||
vfs->open_err = (accumulator.open_err >= vfs->open_err) ? accumulator.open_err : vfs->open_err;
|
||||
vfs->create_err = (accumulator.create_err >= vfs->create_err) ? accumulator.create_err : vfs->create_err;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1183,12 +1209,13 @@ void ebpf_vfs_send_apps_data(ebpf_module_t *em, struct ebpf_target *root)
|
|||
*
|
||||
* @param out the vector with read values.
|
||||
*/
|
||||
static void vfs_apps_accumulator(netdata_publish_vfs_t *out, int maps_per_core)
|
||||
static void vfs_apps_accumulator(netdata_ebpf_vfs_t *out, int maps_per_core)
|
||||
{
|
||||
int i, end = (maps_per_core) ? ebpf_nprocs : 1;
|
||||
netdata_publish_vfs_t *total = &out[0];
|
||||
netdata_ebpf_vfs_t *total = &out[0];
|
||||
uint64_t ct = total->ct;
|
||||
for (i = 1; i < end; i++) {
|
||||
netdata_publish_vfs_t *w = &out[i];
|
||||
netdata_ebpf_vfs_t *w = &out[i];
|
||||
|
||||
total->write_call += w->write_call;
|
||||
total->writev_call += w->writev_call;
|
||||
|
@ -1206,17 +1233,23 @@ static void vfs_apps_accumulator(netdata_publish_vfs_t *out, int maps_per_core)
|
|||
total->read_err += w->read_err;
|
||||
total->readv_err += w->readv_err;
|
||||
total->unlink_err += w->unlink_err;
|
||||
|
||||
if (w->ct > ct)
|
||||
ct = w->ct;
|
||||
|
||||
if (!total->name[0] && w->name[0])
|
||||
strncpyz(total->name, w->name, sizeof(total->name) - 1);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read the hash table and store data to allocated vectors.
|
||||
*/
|
||||
static void ebpf_vfs_read_apps(int maps_per_core, int max_period)
|
||||
static void ebpf_vfs_read_apps(int maps_per_core, uint32_t max_period)
|
||||
{
|
||||
netdata_publish_vfs_t *vv = vfs_vector;
|
||||
netdata_ebpf_vfs_t *vv = vfs_vector;
|
||||
int fd = vfs_maps[NETDATA_VFS_PID].map_fd;
|
||||
size_t length = sizeof(netdata_publish_vfs_t);
|
||||
size_t length = sizeof(netdata_ebpf_vfs_t);
|
||||
if (maps_per_core)
|
||||
length *= ebpf_nprocs;
|
||||
|
||||
|
@ -1228,17 +1261,17 @@ static void ebpf_vfs_read_apps(int maps_per_core, int max_period)
|
|||
|
||||
vfs_apps_accumulator(vv, maps_per_core);
|
||||
|
||||
ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(key, vv->tgid);
|
||||
if (!local_pid)
|
||||
goto end_vfs_loop;
|
||||
|
||||
netdata_publish_vfs_t *publish = &local_pid->vfs;
|
||||
ebpf_pid_data_t *local_pid = ebpf_get_pid_data(key, vv->tgid, vv->name, EBPF_MODULE_VFS_IDX);
|
||||
netdata_publish_vfs_t *publish = local_pid->vfs;
|
||||
if (!publish)
|
||||
local_pid->vfs = publish = ebpf_vfs_allocate_publish();
|
||||
if (!publish->ct || publish->ct != vv->ct) {
|
||||
memcpy(publish, vv, sizeof(netdata_publish_vfs_t));
|
||||
vfs_aggregate_set_vfs(publish, vv);
|
||||
local_pid->not_updated = 0;
|
||||
} else if (++local_pid->not_updated >= max_period){
|
||||
bpf_map_delete_elem(fd, &key);
|
||||
local_pid->not_updated = 0;
|
||||
ebpf_release_pid_data(local_pid, fd, key, EBPF_MODULE_VFS_IDX);
|
||||
ebpf_vfs_release_publish(publish);
|
||||
local_pid->vfs = NULL;
|
||||
}
|
||||
|
||||
end_vfs_loop:
|
||||
|
@ -1264,12 +1297,14 @@ static void read_update_vfs_cgroup()
|
|||
for (pids = ect->pids; pids; pids = pids->next) {
|
||||
int pid = pids->pid;
|
||||
netdata_publish_vfs_t *out = &pids->vfs;
|
||||
ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
|
||||
if (local_pid) {
|
||||
netdata_publish_vfs_t *in = &local_pid->vfs;
|
||||
memset(out, 0, sizeof(netdata_publish_vfs_t));
|
||||
|
||||
memcpy(out, in, sizeof(netdata_publish_vfs_t));
|
||||
}
|
||||
ebpf_pid_data_t *local_pid = ebpf_get_pid_data(pid, 0, NULL, EBPF_MODULE_VFS_IDX);
|
||||
netdata_publish_vfs_t *in = local_pid->vfs;
|
||||
if (!in)
|
||||
continue;
|
||||
|
||||
vfs_aggregate_publish_vfs(out, in);
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&mutex_cgroup_shm);
|
||||
|
@ -1284,7 +1319,7 @@ static void read_update_vfs_cgroup()
|
|||
* @param pids input data
|
||||
*/
|
||||
static void ebpf_vfs_sum_cgroup_pids(netdata_publish_vfs_t *vfs, struct pid_on_target2 *pids)
|
||||
{
|
||||
{
|
||||
netdata_publish_vfs_t accumulator;
|
||||
memset(&accumulator, 0, sizeof(accumulator));
|
||||
|
||||
|
@ -2031,13 +2066,16 @@ void *ebpf_read_vfs_thread(void *ptr)
|
|||
|
||||
int maps_per_core = em->maps_per_core;
|
||||
int update_every = em->update_every;
|
||||
int collect_pid = (em->apps_charts || em->cgroup_charts);
|
||||
if (!collect_pid)
|
||||
return NULL;
|
||||
|
||||
int counter = update_every - 1;
|
||||
|
||||
uint32_t lifetime = em->lifetime;
|
||||
uint32_t running_time = 0;
|
||||
usec_t period = update_every * USEC_PER_SEC;
|
||||
int max_period = update_every * EBPF_CLEANUP_FACTOR;
|
||||
uint32_t max_period = EBPF_CLEANUP_FACTOR;
|
||||
while (!ebpf_plugin_stop() && running_time < lifetime) {
|
||||
(void)heartbeat_next(&hb, period);
|
||||
if (ebpf_plugin_stop() || ++counter != update_every)
|
||||
|
@ -2527,7 +2565,7 @@ void ebpf_vfs_create_apps_charts(struct ebpf_module *em, void *ptr)
|
|||
*/
|
||||
static void ebpf_vfs_allocate_global_vectors()
|
||||
{
|
||||
vfs_vector = callocz(ebpf_nprocs, sizeof(netdata_publish_vfs_t));
|
||||
vfs_vector = callocz(ebpf_nprocs, sizeof(netdata_ebpf_vfs_t));
|
||||
|
||||
memset(vfs_aggregated_data, 0, sizeof(vfs_aggregated_data));
|
||||
memset(vfs_publish_aggregated, 0, sizeof(vfs_publish_aggregated));
|
||||
|
@ -2618,7 +2656,8 @@ void *ebpf_vfs_thread(void *ptr)
|
|||
|
||||
pthread_mutex_unlock(&lock);
|
||||
|
||||
ebpf_read_vfs.thread = nd_thread_create(ebpf_read_vfs.name, NETDATA_THREAD_OPTION_DEFAULT, ebpf_read_vfs_thread, em);
|
||||
ebpf_read_vfs.thread = nd_thread_create(ebpf_read_vfs.name, NETDATA_THREAD_OPTION_DEFAULT,
|
||||
ebpf_read_vfs_thread, em);
|
||||
|
||||
vfs_collector(em);
|
||||
|
||||
|
|
|
@ -75,7 +75,38 @@
|
|||
// dimension
|
||||
#define EBPF_COMMON_UNITS_BYTES "bytes/s"
|
||||
|
||||
typedef struct netdata_publish_vfs {
|
||||
typedef struct __attribute__((packed)) netdata_publish_vfs {
|
||||
uint64_t ct;
|
||||
|
||||
//Counter
|
||||
uint32_t write_call;
|
||||
uint32_t writev_call;
|
||||
uint32_t read_call;
|
||||
uint32_t readv_call;
|
||||
uint32_t unlink_call;
|
||||
uint32_t fsync_call;
|
||||
uint32_t open_call;
|
||||
uint32_t create_call;
|
||||
|
||||
//Accumulator
|
||||
uint64_t write_bytes;
|
||||
uint64_t writev_bytes;
|
||||
uint64_t readv_bytes;
|
||||
uint64_t read_bytes;
|
||||
|
||||
//Counter
|
||||
uint32_t write_err;
|
||||
uint32_t writev_err;
|
||||
uint32_t read_err;
|
||||
uint32_t readv_err;
|
||||
uint32_t unlink_err;
|
||||
uint32_t fsync_err;
|
||||
uint32_t open_err;
|
||||
uint32_t create_err;
|
||||
|
||||
} netdata_publish_vfs_t;
|
||||
|
||||
typedef struct netdata_ebpf_vfs {
|
||||
uint64_t ct;
|
||||
uint32_t tgid;
|
||||
uint32_t uid;
|
||||
|
@ -107,7 +138,7 @@ typedef struct netdata_publish_vfs {
|
|||
uint32_t fsync_err;
|
||||
uint32_t open_err;
|
||||
uint32_t create_err;
|
||||
} netdata_publish_vfs_t;
|
||||
} netdata_ebpf_vfs_t;
|
||||
|
||||
enum netdata_publish_vfs_list {
|
||||
NETDATA_KEY_PUBLISH_VFS_UNLINK,
|
||||
|
|
|
@ -3254,6 +3254,13 @@ modules:
|
|||
dimensions:
|
||||
- name: total
|
||||
- name: running
|
||||
- name: netdata.ebpf_pids
|
||||
description: Total number of monitored PIDs
|
||||
unit: "pids"
|
||||
chart_type: line
|
||||
dimensions:
|
||||
- name: user
|
||||
- name: kernel
|
||||
- name: netdata.ebpf_load_methods
|
||||
description: Load info
|
||||
unit: "methods"
|
||||
|
|
Loading…
Add table
Reference in a new issue