0
0
Fork 0
mirror of https://github.com/netdata/netdata.git synced 2025-04-14 01:29:11 +00:00

Address issues on EC2 (eBPF). ()

This commit is contained in:
thiagoftsm 2023-04-24 16:43:59 +00:00 committed by GitHub
parent 71d3142285
commit 24880f912a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
31 changed files with 799 additions and 706 deletions

View file

@ -4790,6 +4790,7 @@ static void cgroup_main_cleanup(void *ptr) {
}
if (shm_cgroup_ebpf.header) {
shm_cgroup_ebpf.header->cgroup_root_count = 0;
munmap(shm_cgroup_ebpf.header, shm_cgroup_ebpf.header->body_length);
}

View file

@ -28,11 +28,22 @@ int running_on_kernel = 0;
int ebpf_nprocs;
int isrh = 0;
int main_thread_id = 0;
int process_pid_fd = -1;
pthread_mutex_t lock;
pthread_mutex_t ebpf_exit_cleanup;
pthread_mutex_t collect_data_mutex;
pthread_cond_t collect_data_cond_var;
struct netdata_static_thread cgroup_integration_thread = {
.name = "EBPF CGROUP INT",
.config_section = NULL,
.config_name = NULL,
.env_name = NULL,
.enabled = 1,
.thread = NULL,
.init_routine = NULL,
.start_routine = NULL
};
ebpf_module_t ebpf_modules[] = {
{ .thread_name = "process", .config_name = "process", .enabled = 0, .start_routine = ebpf_process_thread,
@ -451,6 +462,14 @@ ebpf_plugin_stats_t plugin_statistics = {.core = 0, .legacy = 0, .running = 0, .
#ifdef LIBBPF_MAJOR_VERSION
struct btf *default_btf = NULL;
struct cachestat_bpf *cachestat_bpf_obj = NULL;
struct dc_bpf *dc_bpf_obj = NULL;
struct fd_bpf *fd_bpf_obj = NULL;
struct mount_bpf *mount_bpf_obj = NULL;
struct shm_bpf *shm_bpf_obj = NULL;
struct socket_bpf *socket_bpf_obj = NULL;
struct swap_bpf *bpf_obj = NULL;
struct vfs_bpf *vfs_bpf_obj = NULL;
#else
void *default_btf = NULL;
#endif
@ -515,10 +534,12 @@ static void ebpf_exit()
#endif
printf("DISABLE\n");
pthread_mutex_lock(&mutex_cgroup_shm);
if (shm_ebpf_cgroup.header) {
munmap(shm_ebpf_cgroup.header, shm_ebpf_cgroup.header->body_length);
ebpf_unmap_cgroup_shared_memory();
shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
}
pthread_mutex_unlock(&mutex_cgroup_shm);
exit(0);
}
@ -545,6 +566,126 @@ static void ebpf_unload_legacy_code(struct bpf_object *objects, struct bpf_link
bpf_object__close(objects);
}
/**
* Unload Unique maps
*
* This function unload all BPF maps from threads using one unique BPF object.
*/
static void ebpf_unload_unique_maps()
{
int i;
for (i = 0; ebpf_modules[i].thread_name; i++) {
if (ebpf_modules[i].enabled != NETDATA_THREAD_EBPF_STOPPED) {
if (ebpf_modules[i].enabled != NETDATA_THREAD_EBPF_NOT_RUNNING)
error("Cannot unload maps for thread %s, because it is not stopped.", ebpf_modules[i].thread_name);
continue;
}
ebpf_unload_legacy_code(ebpf_modules[i].objects, ebpf_modules[i].probe_links);
switch (i) {
case EBPF_MODULE_CACHESTAT_IDX: {
#ifdef LIBBPF_MAJOR_VERSION
if (cachestat_bpf_obj)
cachestat_bpf__destroy(cachestat_bpf_obj);
#endif
break;
}
case EBPF_MODULE_DCSTAT_IDX: {
#ifdef LIBBPF_MAJOR_VERSION
if (dc_bpf_obj)
dc_bpf__destroy(dc_bpf_obj);
#endif
break;
}
case EBPF_MODULE_FD_IDX: {
#ifdef LIBBPF_MAJOR_VERSION
if (fd_bpf_obj)
fd_bpf__destroy(fd_bpf_obj);
#endif
break;
}
case EBPF_MODULE_MOUNT_IDX: {
#ifdef LIBBPF_MAJOR_VERSION
if (mount_bpf_obj)
mount_bpf__destroy(mount_bpf_obj);
#endif
break;
}
case EBPF_MODULE_SHM_IDX: {
#ifdef LIBBPF_MAJOR_VERSION
if (shm_bpf_obj)
shm_bpf__destroy(shm_bpf_obj);
#endif
break;
}
case EBPF_MODULE_SOCKET_IDX: {
#ifdef LIBBPF_MAJOR_VERSION
if (socket_bpf_obj)
socket_bpf__destroy(socket_bpf_obj);
#endif
break;
}
case EBPF_MODULE_SWAP_IDX: {
#ifdef LIBBPF_MAJOR_VERSION
if (bpf_obj)
swap_bpf__destroy(bpf_obj);
#endif
break;
}
case EBPF_MODULE_VFS_IDX: {
#ifdef LIBBPF_MAJOR_VERSION
if (vfs_bpf_obj)
vfs_bpf__destroy(vfs_bpf_obj);
#endif
break;
}
case EBPF_MODULE_PROCESS_IDX:
case EBPF_MODULE_DISK_IDX:
case EBPF_MODULE_HARDIRQ_IDX:
case EBPF_MODULE_SOFTIRQ_IDX:
case EBPF_MODULE_OOMKILL_IDX:
case EBPF_MODULE_MDFLUSH_IDX:
default:
continue;
}
}
}
/**
* Unload filesystem maps
*
* This function unload all BPF maps from filesystem thread.
*/
static void ebpf_unload_filesystems()
{
if (ebpf_modules[EBPF_MODULE_FILESYSTEM_IDX].enabled == NETDATA_THREAD_EBPF_NOT_RUNNING ||
ebpf_modules[EBPF_MODULE_SYNC_IDX].enabled == NETDATA_THREAD_EBPF_RUNNING)
return;
int i;
for (i = 0; localfs[i].filesystem != NULL; i++) {
ebpf_unload_legacy_code(localfs[i].objects, localfs[i].probe_links);
}
}
/**
* Unload sync maps
*
* This function unload all BPF maps from sync thread.
*/
static void ebpf_unload_sync()
{
if (ebpf_modules[EBPF_MODULE_SYNC_IDX].enabled == NETDATA_THREAD_EBPF_NOT_RUNNING ||
ebpf_modules[EBPF_MODULE_SYNC_IDX].enabled == NETDATA_THREAD_EBPF_RUNNING)
return;
int i;
for (i = 0; local_syscalls[i].syscall != NULL; i++) {
ebpf_unload_legacy_code(local_syscalls[i].objects, local_syscalls[i].probe_links);
}
}
int ebpf_exit_plugin = 0;
/**
* Close the collector gracefully
@ -556,7 +697,6 @@ static void ebpf_stop_threads(int sig)
UNUSED(sig);
static int only_one = 0;
int i;
// Child thread should be closed by itself.
pthread_mutex_lock(&ebpf_exit_cleanup);
if (main_thread_id != gettid() || only_one) {
@ -564,13 +704,26 @@ static void ebpf_stop_threads(int sig)
return;
}
only_one = 1;
for (i = 0; ebpf_threads[i].name != NULL; i++) {
if (ebpf_threads[i].enabled != NETDATA_THREAD_EBPF_STOPPED)
netdata_thread_cancel(*ebpf_threads[i].thread);
int i;
for (i = 0; ebpf_modules[i].thread_name != NULL; i++) {
if (ebpf_modules[i].enabled == NETDATA_THREAD_EBPF_RUNNING) {
netdata_thread_cancel(*ebpf_modules[i].thread->thread);
#ifdef NETDATA_DEV_MODE
info("Sending cancel for thread %s", ebpf_modules[i].thread_name);
#endif
}
}
pthread_mutex_unlock(&ebpf_exit_cleanup);
pthread_mutex_lock(&mutex_cgroup_shm);
netdata_thread_cancel(*cgroup_integration_thread.thread);
#ifdef NETDATA_DEV_MODE
info("Sending cancel for thread %s", cgroup_integration_thread.name);
#endif
pthread_mutex_unlock(&mutex_cgroup_shm);
ebpf_exit_plugin = 1;
usec_t max = USEC_PER_SEC, step = 100000;
while (i && max) {
max -= step;
@ -578,42 +731,18 @@ static void ebpf_stop_threads(int sig)
i = 0;
int j;
pthread_mutex_lock(&ebpf_exit_cleanup);
for (j = 0; ebpf_threads[j].name != NULL; j++) {
if (ebpf_threads[j].enabled != NETDATA_THREAD_EBPF_STOPPED)
for (j = 0; ebpf_modules[j].thread_name != NULL; j++) {
if (ebpf_modules[j].enabled == NETDATA_THREAD_EBPF_RUNNING)
i++;
}
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
if (!i) {
//Unload threads(except sync and filesystem)
pthread_mutex_lock(&ebpf_exit_cleanup);
for (i = 0; ebpf_threads[i].name != NULL; i++) {
if (ebpf_threads[i].enabled == NETDATA_THREAD_EBPF_STOPPED && i != EBPF_MODULE_FILESYSTEM_IDX &&
i != EBPF_MODULE_SYNC_IDX)
ebpf_unload_legacy_code(ebpf_modules[i].objects, ebpf_modules[i].probe_links);
}
pthread_mutex_unlock(&ebpf_exit_cleanup);
//Unload filesystem
pthread_mutex_lock(&ebpf_exit_cleanup);
if (ebpf_threads[EBPF_MODULE_FILESYSTEM_IDX].enabled == NETDATA_THREAD_EBPF_STOPPED) {
for (i = 0; localfs[i].filesystem != NULL; i++) {
ebpf_unload_legacy_code(localfs[i].objects, localfs[i].probe_links);
}
}
pthread_mutex_unlock(&ebpf_exit_cleanup);
//Unload Sync
pthread_mutex_lock(&ebpf_exit_cleanup);
if (ebpf_threads[EBPF_MODULE_SYNC_IDX].enabled == NETDATA_THREAD_EBPF_STOPPED) {
for (i = 0; local_syscalls[i].syscall != NULL; i++) {
ebpf_unload_legacy_code(local_syscalls[i].objects, local_syscalls[i].probe_links);
}
}
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
pthread_mutex_lock(&ebpf_exit_cleanup);
ebpf_unload_unique_maps();
ebpf_unload_filesystems();
ebpf_unload_sync();
pthread_mutex_unlock(&ebpf_exit_cleanup);
ebpf_exit();
}
@ -624,6 +753,58 @@ static void ebpf_stop_threads(int sig)
*
*****************************************************************/
/**
* Create apps charts
*
* Call ebpf_create_chart to create the charts on apps submenu.
*
* @param root a pointer for the targets.
*/
static void ebpf_create_apps_charts(struct ebpf_target *root)
{
if (unlikely(!ebpf_all_pids))
return;
struct ebpf_target *w;
int newly_added = 0;
for (w = root; w; w = w->next) {
if (w->target)
continue;
if (unlikely(w->processes && (debug_enabled || w->debug_enabled))) {
struct ebpf_pid_on_target *pid_on_target;
fprintf(
stderr, "ebpf.plugin: target '%s' has aggregated %u process%s:", w->name, w->processes,
(w->processes == 1) ? "" : "es");
for (pid_on_target = w->root_pid; pid_on_target; pid_on_target = pid_on_target->next) {
fprintf(stderr, " %d", pid_on_target->pid);
}
fputc('\n', stderr);
}
if (!w->exposed && w->processes) {
newly_added++;
w->exposed = 1;
if (debug_enabled || w->debug_enabled)
debug_log_int("%s just added - regenerating charts.", w->name);
}
}
if (!newly_added)
return;
int counter;
for (counter = 0; ebpf_modules[counter].thread_name; counter++) {
ebpf_module_t *current = &ebpf_modules[counter];
if (current->enabled == NETDATA_THREAD_EBPF_RUNNING && current->apps_charts && current->apps_routine)
current->apps_routine(current, root);
}
}
/**
* Get a value from a structure.
*
@ -1044,7 +1225,7 @@ void ebpf_global_labels(netdata_syscall_stat_t *is, netdata_publish_syscall_t *p
pio[i].dimension = dim[i];
pio[i].name = name[i];
pio[i].algorithm = strdupz(ebpf_algorithms[algorithm[i]]);
pio[i].algorithm = ebpf_algorithms[algorithm[i]];
if (publish_prev) {
publish_prev->next = &pio[i];
}
@ -1442,21 +1623,13 @@ static void read_local_addresses()
* Start Pthread Variable
*
* This function starts all pthread variables.
*
* @return It returns 0 on success and -1.
*/
int ebpf_start_pthread_variables()
void ebpf_start_pthread_variables()
{
pthread_mutex_init(&lock, NULL);
pthread_mutex_init(&ebpf_exit_cleanup, NULL);
pthread_mutex_init(&collect_data_mutex, NULL);
if (pthread_cond_init(&collect_data_cond_var, NULL)) {
error("Cannot start conditional variable to control Apps charts.");
return -1;
}
return 0;
pthread_mutex_init(&mutex_cgroup_shm, NULL);
}
/**
@ -2320,10 +2493,7 @@ int main(int argc, char **argv)
signal(SIGTERM, ebpf_stop_threads);
signal(SIGPIPE, ebpf_stop_threads);
if (ebpf_start_pthread_variables()) {
error("Cannot start mutex to control overall charts.");
ebpf_exit();
}
ebpf_start_pthread_variables();
netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX");
if(verify_netdata_host_prefix() == -1) ebpf_exit(6);
@ -2342,6 +2512,12 @@ int main(int argc, char **argv)
ebpf_set_static_routine();
cgroup_integration_thread.thread = mallocz(sizeof(netdata_thread_t));
cgroup_integration_thread.start_routine = ebpf_cgroup_integration;
netdata_thread_create(cgroup_integration_thread.thread, cgroup_integration_thread.name,
NETDATA_THREAD_OPTION_DEFAULT, ebpf_cgroup_integration, NULL);
int i;
for (i = 0; ebpf_threads[i].name != NULL; i++) {
struct netdata_static_thread *st = &ebpf_threads[i];
@ -2352,30 +2528,37 @@ int main(int argc, char **argv)
if (em->enabled || !i) {
st->thread = mallocz(sizeof(netdata_thread_t));
em->thread_id = i;
st->enabled = NETDATA_THREAD_EBPF_RUNNING;
em->enabled = NETDATA_THREAD_EBPF_RUNNING;
netdata_thread_create(st->thread, st->name, NETDATA_THREAD_OPTION_DEFAULT, st->start_routine, em);
} else {
st->enabled = NETDATA_THREAD_EBPF_STOPPED;
em->enabled = NETDATA_THREAD_EBPF_NOT_RUNNING;
}
}
usec_t step = USEC_PER_SEC;
int counter = NETDATA_EBPF_CGROUP_UPDATE - 1;
heartbeat_t hb;
heartbeat_init(&hb);
int update_apps_every = (int) EBPF_CFG_UPDATE_APPS_EVERY_DEFAULT;
int update_apps_list = update_apps_every - 1;
//Plugin will be killed when it receives a signal
while (!ebpf_exit_plugin) {
(void)heartbeat_next(&hb, step);
// We are using a small heartbeat time to wake up thread,
// but we should not update so frequently the shared memory data
if (++counter >= NETDATA_EBPF_CGROUP_UPDATE) {
counter = 0;
if (!shm_ebpf_cgroup.header)
ebpf_map_cgroup_shared_memory();
pthread_mutex_lock(&ebpf_exit_cleanup);
if (ebpf_modules[i].enabled == NETDATA_THREAD_EBPF_RUNNING && process_pid_fd != -1) {
pthread_mutex_lock(&collect_data_mutex);
if (++update_apps_list == update_apps_every) {
update_apps_list = 0;
cleanup_exited_pids();
collect_data_for_all_processes(process_pid_fd);
ebpf_parse_cgroup_shm_data();
pthread_mutex_lock(&lock);
ebpf_create_apps_charts(apps_groups_root_target);
pthread_mutex_unlock(&lock);
}
pthread_mutex_unlock(&collect_data_mutex);
}
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
ebpf_stop_threads(0);

View file

@ -62,7 +62,7 @@
process = yes
shm = yes
socket = no
softirq = no
softirq = yes
sync = yes
swap = yes
vfs = no

View file

@ -36,6 +36,26 @@
#define NETDATA_EBPF_OLD_CONFIG_FILE "ebpf.conf"
#define NETDATA_EBPF_CONFIG_FILE "ebpf.d.conf"
#ifdef LIBBPF_MAJOR_VERSION // BTF code
#include "includes/cachestat.skel.h"
#include "includes/dc.skel.h"
#include "includes/fd.skel.h"
#include "includes/mount.skel.h"
#include "includes/shm.skel.h"
#include "includes/socket.skel.h"
#include "includes/swap.skel.h"
#include "includes/vfs.skel.h"
extern struct cachestat_bpf *cachestat_bpf_obj;
extern struct dc_bpf *dc_bpf_obj;
extern struct fd_bpf *fd_bpf_obj;
extern struct mount_bpf *mount_bpf_obj;
extern struct shm_bpf *shm_bpf_obj;
extern struct socket_bpf *socket_bpf_obj;
extern struct swap_bpf *bpf_obj;
extern struct vfs_bpf *vfs_bpf_obj;
#endif
typedef struct netdata_syscall_stat {
unsigned long bytes; // total number of bytes
uint64_t call; // total number of calls
@ -108,12 +128,6 @@ typedef struct ebpf_tracepoint {
char *event;
} ebpf_tracepoint_t;
enum ebpf_threads_status {
NETDATA_THREAD_EBPF_RUNNING,
NETDATA_THREAD_EBPF_STOPPING,
NETDATA_THREAD_EBPF_STOPPED
};
// Copied from musl header
#ifndef offsetof
#if __GNUC__ > 3
@ -178,9 +192,9 @@ extern int ebpf_nprocs;
extern int running_on_kernel;
extern int isrh;
extern char *ebpf_plugin_dir;
extern int process_pid_fd;
extern pthread_mutex_t collect_data_mutex;
extern pthread_cond_t collect_data_cond_var;
// Common functions
void ebpf_global_labels(netdata_syscall_stat_t *is,
@ -243,8 +257,6 @@ void ebpf_create_charts_on_apps(char *name,
void write_end_chart();
void ebpf_cleanup_publish_syscall(netdata_publish_syscall_t *nps);
int ebpf_enable_tracepoint(ebpf_tracepoint_t *tp);
int ebpf_disable_tracepoint(ebpf_tracepoint_t *tp);
uint32_t ebpf_enable_tracepoints(ebpf_tracepoint_t *tps);

View file

@ -8,6 +8,23 @@
// ARAL vectors used to speed up processing
ARAL *ebpf_aral_apps_pid_stat = NULL;
ARAL *ebpf_aral_process_stat = NULL;
ARAL *ebpf_aral_socket_pid = NULL;
ARAL *ebpf_aral_cachestat_pid = NULL;
ARAL *ebpf_aral_dcstat_pid = NULL;
ARAL *ebpf_aral_vfs_pid = NULL;
ARAL *ebpf_aral_fd_pid = NULL;
ARAL *ebpf_aral_shm_pid = NULL;
// ----------------------------------------------------------------------------
// Global vectors used with apps
ebpf_socket_publish_apps_t **socket_bandwidth_curr = NULL;
netdata_publish_cachestat_t **cachestat_pid = NULL;
netdata_publish_dcstat_t **dcstat_pid = NULL;
netdata_publish_swap_t **swap_pid = NULL;
netdata_publish_vfs_t **vfs_pid = NULL;
netdata_fd_stat_t **fd_pid = NULL;
netdata_publish_shm_t **shm_pid = NULL;
ebpf_process_stat_t **global_process_stats = NULL;
/**
* eBPF ARAL Init
@ -55,6 +72,12 @@ void ebpf_pid_stat_release(struct ebpf_pid_stat *stat)
aral_freez(ebpf_aral_apps_pid_stat, stat);
}
/*****************************************************************
*
* PROCESS ARAL FUNCTIONS
*
*****************************************************************/
/**
* eBPF process stat get
*
@ -79,6 +102,246 @@ void ebpf_process_stat_release(ebpf_process_stat_t *stat)
aral_freez(ebpf_aral_process_stat, stat);
}
/*****************************************************************
*
* SOCKET ARAL FUNCTIONS
*
*****************************************************************/
/**
* eBPF socket Aral init
*
* Initiallize array allocator that will be used when integration with apps is enabled.
*/
void ebpf_socket_aral_init()
{
ebpf_aral_socket_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_SOCKET_ARAL_NAME, sizeof(ebpf_socket_publish_apps_t));
}
/**
* eBPF socket get
*
* Get a ebpf_socket_publish_apps_t entry to be used with a specific PID.
*
* @return it returns the address on success.
*/
ebpf_socket_publish_apps_t *ebpf_socket_stat_get(void)
{
ebpf_socket_publish_apps_t *target = aral_mallocz(ebpf_aral_socket_pid);
memset(target, 0, sizeof(ebpf_socket_publish_apps_t));
return target;
}
/**
* eBPF socket release
*
* @param stat Release a target after usage.
*/
void ebpf_socket_release(ebpf_socket_publish_apps_t *stat)
{
aral_freez(ebpf_aral_socket_pid, stat);
}
/*****************************************************************
*
* CACHESTAT ARAL FUNCTIONS
*
*****************************************************************/
/**
* eBPF Cachestat Aral init
*
* Initiallize array allocator that will be used when integration with apps is enabled.
*/
void ebpf_cachestat_aral_init()
{
ebpf_aral_cachestat_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_CACHESTAT_ARAL_NAME, sizeof(netdata_publish_cachestat_t));
}
/**
* eBPF publish cachestat get
*
* Get a netdata_publish_cachestat_t entry to be used with a specific PID.
*
* @return it returns the address on success.
*/
netdata_publish_cachestat_t *ebpf_publish_cachestat_get(void)
{
netdata_publish_cachestat_t *target = aral_mallocz(ebpf_aral_cachestat_pid);
memset(target, 0, sizeof(netdata_publish_cachestat_t));
return target;
}
/**
* eBPF cachestat release
*
* @param stat Release a target after usage.
*/
void ebpf_cachestat_release(netdata_publish_cachestat_t *stat)
{
aral_freez(ebpf_aral_cachestat_pid, stat);
}
/*****************************************************************
*
* DCSTAT ARAL FUNCTIONS
*
*****************************************************************/
/**
* eBPF directory cache Aral init
*
* Initiallize array allocator that will be used when integration with apps is enabled.
*/
void ebpf_dcstat_aral_init()
{
ebpf_aral_dcstat_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_DCSTAT_ARAL_NAME, sizeof(netdata_publish_dcstat_t));
}
/**
* eBPF publish dcstat get
*
* Get a netdata_publish_dcstat_t entry to be used with a specific PID.
*
* @return it returns the address on success.
*/
netdata_publish_dcstat_t *ebpf_publish_dcstat_get(void)
{
netdata_publish_dcstat_t *target = aral_mallocz(ebpf_aral_dcstat_pid);
memset(target, 0, sizeof(netdata_publish_dcstat_t));
return target;
}
/**
* eBPF dcstat release
*
* @param stat Release a target after usage.
*/
void ebpf_dcstat_release(netdata_publish_dcstat_t *stat)
{
aral_freez(ebpf_aral_dcstat_pid, stat);
}
/*****************************************************************
*
* VFS ARAL FUNCTIONS
*
*****************************************************************/
/**
* eBPF VFS Aral init
*
* Initiallize array allocator that will be used when integration with apps is enabled.
*/
void ebpf_vfs_aral_init()
{
ebpf_aral_vfs_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_VFS_ARAL_NAME, sizeof(netdata_publish_vfs_t));
}
/**
* eBPF publish VFS get
*
* Get a netdata_publish_vfs_t entry to be used with a specific PID.
*
* @return it returns the address on success.
*/
netdata_publish_vfs_t *ebpf_vfs_get(void)
{
netdata_publish_vfs_t *target = aral_mallocz(ebpf_aral_vfs_pid);
memset(target, 0, sizeof(netdata_publish_vfs_t));
return target;
}
/**
* eBPF VFS release
*
* @param stat Release a target after usage.
*/
void ebpf_vfs_release(netdata_publish_vfs_t *stat)
{
aral_freez(ebpf_aral_vfs_pid, stat);
}
/*****************************************************************
*
* FD ARAL FUNCTIONS
*
*****************************************************************/
/**
* eBPF file descriptor Aral init
*
* Initiallize array allocator that will be used when integration with apps is enabled.
*/
void ebpf_fd_aral_init()
{
ebpf_aral_fd_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_FD_ARAL_NAME, sizeof(netdata_fd_stat_t));
}
/**
* eBPF publish file descriptor get
*
* Get a netdata_fd_stat_t entry to be used with a specific PID.
*
* @return it returns the address on success.
*/
netdata_fd_stat_t *ebpf_fd_stat_get(void)
{
netdata_fd_stat_t *target = aral_mallocz(ebpf_aral_fd_pid);
memset(target, 0, sizeof(netdata_fd_stat_t));
return target;
}
/**
* eBPF file descriptor release
*
* @param stat Release a target after usage.
*/
void ebpf_fd_release(netdata_fd_stat_t *stat)
{
aral_freez(ebpf_aral_fd_pid, stat);
}
/*****************************************************************
*
* SHM ARAL FUNCTIONS
*
*****************************************************************/
/**
* eBPF shared memory Aral init
*
* Initiallize array allocator that will be used when integration with apps is enabled.
*/
void ebpf_shm_aral_init()
{
ebpf_aral_shm_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_SHM_ARAL_NAME, sizeof(netdata_publish_shm_t));
}
/**
* eBPF shared memory get
*
* Get a netdata_publish_shm_t entry to be used with a specific PID.
*
* @return it returns the address on success.
*/
netdata_publish_shm_t *ebpf_shm_stat_get(void)
{
netdata_publish_shm_t *target = aral_mallocz(ebpf_aral_shm_pid);
memset(target, 0, sizeof(netdata_publish_shm_t));
return target;
}
/**
* eBPF shared memory release
*
* @param stat Release a target after usage.
*/
void ebpf_shm_release(netdata_publish_shm_t *stat)
{
aral_freez(ebpf_aral_shm_pid, stat);
}
// ----------------------------------------------------------------------------
// internal flags
// handled in code (automatically set)

View file

@ -218,6 +218,10 @@ void collect_data_for_all_processes(int tbl_pid_stats_fd);
extern ebpf_process_stat_t **global_process_stats;
extern netdata_publish_cachestat_t **cachestat_pid;
extern netdata_publish_dcstat_t **dcstat_pid;
extern netdata_publish_swap_t **swap_pid;
extern netdata_publish_vfs_t **vfs_pid;
extern netdata_fd_stat_t **fd_pid;
extern netdata_publish_shm_t **shm_pid;
// The default value is at least 32 times smaller than maximum number of PIDs allowed on system,
// this is only possible because we are using ARAL (https://github.com/netdata/netdata/tree/master/libnetdata/aral).
@ -226,11 +230,48 @@ extern netdata_publish_dcstat_t **dcstat_pid;
#endif
#define NETDATA_EBPF_ALLOC_MIN_ELEMENTS 256
// ARAL Sectiion
extern void ebpf_aral_init(void);
extern ebpf_process_stat_t *ebpf_process_stat_get(void);
extern void ebpf_process_stat_release(ebpf_process_stat_t *stat);
extern ARAL *ebpf_aral_socket_pid;
void ebpf_socket_aral_init();
ebpf_socket_publish_apps_t *ebpf_socket_stat_get(void);
void ebpf_socket_release(ebpf_socket_publish_apps_t *stat);
extern ARAL *ebpf_aral_cachestat_pid;
void ebpf_cachestat_aral_init();
netdata_publish_cachestat_t *ebpf_publish_cachestat_get(void);
void ebpf_cachestat_release(netdata_publish_cachestat_t *stat);
extern ARAL *ebpf_aral_dcstat_pid;
void ebpf_dcstat_aral_init();
netdata_publish_dcstat_t *ebpf_publish_dcstat_get(void);
void ebpf_dcstat_release(netdata_publish_dcstat_t *stat);
extern ARAL *ebpf_aral_vfs_pid;
void ebpf_vfs_aral_init();
netdata_publish_vfs_t *ebpf_vfs_get(void);
void ebpf_vfs_release(netdata_publish_vfs_t *stat);
extern ARAL *ebpf_aral_fd_pid;
void ebpf_fd_aral_init();
netdata_fd_stat_t *ebpf_fd_stat_get(void);
void ebpf_fd_release(netdata_fd_stat_t *stat);
extern ARAL *ebpf_aral_shm_pid;
void ebpf_shm_aral_init();
netdata_publish_shm_t *ebpf_shm_stat_get(void);
void ebpf_shm_release(netdata_publish_shm_t *stat);
// ARAL Section end
// Threads integrated with apps
extern ebpf_socket_publish_apps_t **socket_bandwidth_curr;
// Threads integrated with apps
#include "libnetdata/threads/threads.h"
// ARAL variables

View file

@ -3,12 +3,6 @@
#include "ebpf.h"
#include "ebpf_cachestat.h"
// ----------------------------------------------------------------------------
// ARAL vectors used to speed up processing
ARAL *ebpf_aral_cachestat_pid = NULL;
netdata_publish_cachestat_t **cachestat_pid;
static char *cachestat_counter_dimension_name[NETDATA_CACHESTAT_END] = { "ratio", "dirty", "hit",
"miss" };
static netdata_syscall_stat_t cachestat_counter_aggregated_data[NETDATA_CACHESTAT_END];
@ -50,10 +44,6 @@ static char *account_page[NETDATA_CACHESTAT_ACCOUNT_DIRTY_END] ={ "account_page_
"__set_page_dirty", "__folio_mark_dirty" };
#ifdef LIBBPF_MAJOR_VERSION
#include "includes/cachestat.skel.h" // BTF code
static struct cachestat_bpf *bpf_obj = NULL;
/**
* Disable probe
*
@ -337,20 +327,14 @@ static inline int ebpf_cachestat_load_and_attach(struct cachestat_bpf *obj, ebpf
static void ebpf_cachestat_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
em->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
ebpf_cleanup_publish_syscall(cachestat_counter_publish_aggregated);
freez(cachestat_vector);
freez(cachestat_values);
#ifdef LIBBPF_MAJOR_VERSION
if (bpf_obj)
cachestat_bpf__destroy(bpf_obj);
#endif
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@ -368,46 +352,6 @@ static void ebpf_cachestat_exit(void *ptr)
ebpf_cachestat_free(em);
}
/*****************************************************************
*
* ARAL FUNCTIONS
*
*****************************************************************/
/**
* eBPF Cachestat Aral init
*
* Initiallize array allocator that will be used when integration with apps is enabled.
*/
static inline void ebpf_cachestat_aral_init()
{
ebpf_aral_cachestat_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_CACHESTAT_ARAL_NAME, sizeof(netdata_publish_cachestat_t));
}
/**
* eBPF publish cachestat get
*
* Get a netdata_publish_cachestat_t entry to be used with a specific PID.
*
* @return it returns the address on success.
*/
netdata_publish_cachestat_t *ebpf_publish_cachestat_get(void)
{
netdata_publish_cachestat_t *target = aral_mallocz(ebpf_aral_cachestat_pid);
memset(target, 0, sizeof(netdata_publish_cachestat_t));
return target;
}
/**
* eBPF cachestat release
*
* @param stat Release a target after usage.
*/
void ebpf_cachestat_release(netdata_publish_cachestat_t *stat)
{
aral_freez(ebpf_aral_cachestat_pid, stat);
}
/*****************************************************************
*
* COMMON FUNCTIONS
@ -1282,11 +1226,11 @@ static int ebpf_cachestat_load_bpf(ebpf_module_t *em)
}
#ifdef LIBBPF_MAJOR_VERSION
else {
bpf_obj = cachestat_bpf__open();
if (!bpf_obj)
cachestat_bpf_obj = cachestat_bpf__open();
if (!cachestat_bpf_obj)
ret = -1;
else
ret = ebpf_cachestat_load_and_attach(bpf_obj, em);
ret = ebpf_cachestat_load_and_attach(cachestat_bpf_obj, em);
}
#endif
@ -1315,7 +1259,6 @@ void *ebpf_cachestat_thread(void *ptr)
ebpf_update_pid_table(&cachestat_maps[NETDATA_CACHESTAT_PID_STATS], em);
if (ebpf_cachestat_set_internal_value()) {
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endcachestat;
}
@ -1323,7 +1266,6 @@ void *ebpf_cachestat_thread(void *ptr)
ebpf_adjust_thread_load(em, default_btf);
#endif
if (ebpf_cachestat_load_bpf(em)) {
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endcachestat;
}

View file

@ -6,6 +6,7 @@
#include "ebpf_cgroup.h"
ebpf_cgroup_target_t *ebpf_cgroup_pids = NULL;
static void *ebpf_mapped_memory = NULL;
int send_cgroup_chart = 0;
// --------------------------------------------------------------------------------------------------------------------
@ -19,7 +20,7 @@ int send_cgroup_chart = 0;
* @param fd file descriptor returned after shm_open was called.
* @param length length of the shared memory
*
* @return It returns a pointer to the region mapped.
* @return It returns a pointer to the region mapped on success and MAP_FAILED otherwise.
*/
static inline void *ebpf_cgroup_map_shm_locally(int fd, size_t length)
{
@ -36,6 +37,16 @@ static inline void *ebpf_cgroup_map_shm_locally(int fd, size_t length)
return value;
}
/**
* Unmap Shared Memory
*
* Unmap shared memory used to integrate eBPF and cgroup plugin
*/
void ebpf_unmap_cgroup_shared_memory()
{
munmap(ebpf_mapped_memory, shm_ebpf_cgroup.header->body_length);
}
/**
* Map cgroup shared memory
*
@ -56,40 +67,47 @@ void ebpf_map_cgroup_shared_memory()
limit_try++;
next_try = curr_time + NETDATA_EBPF_CGROUP_NEXT_TRY_SEC;
shm_fd_ebpf_cgroup = shm_open(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME, O_RDWR, 0660);
if (shm_fd_ebpf_cgroup < 0) {
if (limit_try == NETDATA_EBPF_CGROUP_MAX_TRIES)
error("Shared memory was not initialized, integration between processes won't happen.");
shm_fd_ebpf_cgroup = shm_open(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME, O_RDWR, 0660);
if (shm_fd_ebpf_cgroup < 0) {
if (limit_try == NETDATA_EBPF_CGROUP_MAX_TRIES)
error("Shared memory was not initialized, integration between processes won't happen.");
return;
return;
}
}
// Map only header
shm_ebpf_cgroup.header = (netdata_ebpf_cgroup_shm_header_t *) ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup,
sizeof(netdata_ebpf_cgroup_shm_header_t));
if (!shm_ebpf_cgroup.header) {
limit_try = NETDATA_EBPF_CGROUP_MAX_TRIES + 1;
void *mapped = (netdata_ebpf_cgroup_shm_header_t *) ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup,
sizeof(netdata_ebpf_cgroup_shm_header_t));
if (unlikely(mapped == SEM_FAILED)) {
return;
}
netdata_ebpf_cgroup_shm_header_t *header = mapped;
size_t length = header->body_length;
munmap(header, sizeof(netdata_ebpf_cgroup_shm_header_t));
if (length <= ((sizeof(netdata_ebpf_cgroup_shm_header_t) + sizeof(netdata_ebpf_cgroup_shm_body_t)))) {
return;
}
size_t length = shm_ebpf_cgroup.header->body_length;
munmap(shm_ebpf_cgroup.header, sizeof(netdata_ebpf_cgroup_shm_header_t));
shm_ebpf_cgroup.header = (netdata_ebpf_cgroup_shm_header_t *)ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup, length);
if (!shm_ebpf_cgroup.header) {
limit_try = NETDATA_EBPF_CGROUP_MAX_TRIES + 1;
ebpf_mapped_memory = (void *)ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup, length);
if (unlikely(ebpf_mapped_memory == MAP_FAILED)) {
return;
}
shm_ebpf_cgroup.body = (netdata_ebpf_cgroup_shm_body_t *) ((char *)shm_ebpf_cgroup.header +
sizeof(netdata_ebpf_cgroup_shm_header_t));
shm_ebpf_cgroup.header = ebpf_mapped_memory;
shm_ebpf_cgroup.body = ebpf_mapped_memory + sizeof(netdata_ebpf_cgroup_shm_header_t);
shm_sem_ebpf_cgroup = sem_open(NETDATA_NAMED_SEMAPHORE_EBPF_CGROUP_NAME, O_CREAT, 0660, 1);
if (shm_sem_ebpf_cgroup == SEM_FAILED) {
error("Cannot create semaphore, integration between eBPF and cgroup won't happen");
munmap(shm_ebpf_cgroup.header, length);
limit_try = NETDATA_EBPF_CGROUP_MAX_TRIES + 1;
munmap(ebpf_mapped_memory, length);
shm_ebpf_cgroup.header = NULL;
shm_ebpf_cgroup.body = NULL;
close(shm_fd_ebpf_cgroup);
shm_fd_ebpf_cgroup = -1;
shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
@ -258,32 +276,38 @@ void ebpf_reset_updated_var()
void ebpf_parse_cgroup_shm_data()
{
static int previous = 0;
if (shm_ebpf_cgroup.header) {
sem_wait(shm_sem_ebpf_cgroup);
int i, end = shm_ebpf_cgroup.header->cgroup_root_count;
pthread_mutex_lock(&mutex_cgroup_shm);
ebpf_remove_cgroup_target_update_list();
ebpf_reset_updated_var();
for (i = 0; i < end; i++) {
netdata_ebpf_cgroup_shm_body_t *ptr = &shm_ebpf_cgroup.body[i];
if (ptr->enabled) {
ebpf_cgroup_target_t *ect = ebpf_cgroup_find_or_create(ptr);
ebpf_update_pid_link_list(ect, ptr->path);
}
}
send_cgroup_chart = previous != shm_ebpf_cgroup.header->cgroup_root_count;
previous = shm_ebpf_cgroup.header->cgroup_root_count;
#ifdef NETDATA_DEV_MODE
error("Updating cgroup %d (Previous: %d, Current: %d)", send_cgroup_chart, previous, shm_ebpf_cgroup.header->cgroup_root_count);
#endif
pthread_mutex_unlock(&mutex_cgroup_shm);
if (!shm_ebpf_cgroup.header || shm_sem_ebpf_cgroup == SEM_FAILED)
return;
sem_wait(shm_sem_ebpf_cgroup);
int i, end = shm_ebpf_cgroup.header->cgroup_root_count;
if (end <= 0) {
sem_post(shm_sem_ebpf_cgroup);
return;
}
pthread_mutex_lock(&mutex_cgroup_shm);
ebpf_remove_cgroup_target_update_list();
ebpf_reset_updated_var();
for (i = 0; i < end; i++) {
netdata_ebpf_cgroup_shm_body_t *ptr = &shm_ebpf_cgroup.body[i];
if (ptr->enabled) {
ebpf_cgroup_target_t *ect = ebpf_cgroup_find_or_create(ptr);
ebpf_update_pid_link_list(ect, ptr->path);
}
}
send_cgroup_chart = previous != shm_ebpf_cgroup.header->cgroup_root_count;
previous = shm_ebpf_cgroup.header->cgroup_root_count;
sem_post(shm_sem_ebpf_cgroup);
pthread_mutex_unlock(&mutex_cgroup_shm);
#ifdef NETDATA_DEV_MODE
info("Updating cgroup %d (Previous: %d, Current: %d)",
send_cgroup_chart, previous, shm_ebpf_cgroup.header->cgroup_root_count);
#endif
sem_post(shm_sem_ebpf_cgroup);
}
// --------------------------------------------------------------------------------------------------------------------
@ -315,3 +339,54 @@ void ebpf_create_charts_on_systemd(char *id, char *title, char *units, char *fam
fprintf(stdout, "DIMENSION %s '' %s 1 1\n", w->name, algorithm);
}
}
// --------------------------------------------------------------------------------------------------------------------
// Cgroup main thread
/**
* CGROUP exit
*
* Clean up the main thread.
*
* @param ptr thread data.
*/
static void ebpf_cgroup_exit(void *ptr)
{
UNUSED(ptr);
}
/**
* Cgroup integratin
*
* Thread responsible to call functions responsible to sync data between plugins.
*
* @param ptr It is a NULL value for this thread.
*
* @return It always returns NULL.
*/
void *ebpf_cgroup_integration(void *ptr)
{
netdata_thread_cleanup_push(ebpf_cgroup_exit, ptr);
usec_t step = USEC_PER_SEC;
int counter = NETDATA_EBPF_CGROUP_UPDATE - 1;
heartbeat_t hb;
heartbeat_init(&hb);
//Plugin will be killed when it receives a signal
while (!ebpf_exit_plugin) {
(void)heartbeat_next(&hb, step);
// We are using a small heartbeat time to wake up thread,
// but we should not update so frequently the shared memory data
if (++counter >= NETDATA_EBPF_CGROUP_UPDATE) {
counter = 0;
if (!shm_ebpf_cgroup.header)
ebpf_map_cgroup_shared_memory();
else
ebpf_parse_cgroup_shm_data();
}
}
netdata_thread_cleanup_pop(1);
return NULL;
}

View file

@ -64,6 +64,8 @@ void ebpf_map_cgroup_shared_memory();
void ebpf_parse_cgroup_shm_data();
void ebpf_create_charts_on_systemd(char *id, char *title, char *units, char *family, char *charttype, int order,
char *algorithm, char *context, char *module, int update_every);
void *ebpf_cgroup_integration(void *ptr);
void ebpf_unmap_cgroup_shared_memory();
extern int send_cgroup_chart;
#endif /* NETDATA_EBPF_CGROUP_H */

View file

@ -3,16 +3,11 @@
#include "ebpf.h"
#include "ebpf_dcstat.h"
// ----------------------------------------------------------------------------
// ARAL vectors used to speed up processing
ARAL *ebpf_aral_dcstat_pid = NULL;
static char *dcstat_counter_dimension_name[NETDATA_DCSTAT_IDX_END] = { "ratio", "reference", "slow", "miss" };
static netdata_syscall_stat_t dcstat_counter_aggregated_data[NETDATA_DCSTAT_IDX_END];
static netdata_publish_syscall_t dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_END];
netdata_dcstat_pid_t *dcstat_vector = NULL;
netdata_publish_dcstat_t **dcstat_pid = NULL;
static netdata_idx_t dcstat_hash_values[NETDATA_DCSTAT_IDX_END];
static netdata_idx_t *dcstat_values = NULL;
@ -49,10 +44,6 @@ netdata_ebpf_targets_t dc_targets[] = { {.name = "lookup_fast", .mode = EBPF_LOA
{.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
#ifdef LIBBPF_MAJOR_VERSION
#include "includes/dc.skel.h" // BTF code
static struct dc_bpf *bpf_obj = NULL;
/**
* Disable probe
*
@ -298,23 +289,16 @@ void ebpf_dcstat_clean_names()
static void ebpf_dcstat_free(ebpf_module_t *em )
{
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
em->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
freez(dcstat_vector);
freez(dcstat_values);
ebpf_cleanup_publish_syscall(dcstat_counter_publish_aggregated);
ebpf_dcstat_clean_names();
#ifdef LIBBPF_MAJOR_VERSION
if (bpf_obj)
dc_bpf__destroy(bpf_obj);
#endif
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@ -331,46 +315,6 @@ static void ebpf_dcstat_exit(void *ptr)
ebpf_dcstat_free(em);
}
/*****************************************************************
*
* ARAL FUNCTIONS
*
*****************************************************************/
/**
* eBPF directory cache Aral init
*
* Initiallize array allocator that will be used when integration with apps is enabled.
*/
static inline void ebpf_dcstat_aral_init()
{
ebpf_aral_dcstat_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_DCSTAT_ARAL_NAME, sizeof(netdata_publish_dcstat_t));
}
/**
* eBPF publish dcstat get
*
* Get a netdata_publish_dcstat_t entry to be used with a specific PID.
*
* @return it returns the address on success.
*/
netdata_publish_dcstat_t *ebpf_publish_dcstat_get(void)
{
netdata_publish_dcstat_t *target = aral_mallocz(ebpf_aral_dcstat_pid);
memset(target, 0, sizeof(netdata_publish_dcstat_t));
return target;
}
/**
* eBPF dcstat release
*
* @param stat Release a target after usage.
*/
void ebpf_dcstat_release(netdata_publish_dcstat_t *stat)
{
aral_freez(ebpf_aral_dcstat_pid, stat);
}
/*****************************************************************
*
* APPS
@ -1150,11 +1094,11 @@ static int ebpf_dcstat_load_bpf(ebpf_module_t *em)
}
#ifdef LIBBPF_MAJOR_VERSION
else {
bpf_obj = dc_bpf__open();
if (!bpf_obj)
dc_bpf_obj = dc_bpf__open();
if (!dc_bpf_obj)
ret = -1;
else
ret = ebpf_dc_load_and_attach(bpf_obj, em);
ret = ebpf_dc_load_and_attach(dc_bpf_obj, em);
}
#endif
@ -1188,7 +1132,6 @@ void *ebpf_dcstat_thread(void *ptr)
ebpf_adjust_thread_load(em, default_btf);
#endif
if (ebpf_dcstat_load_bpf(em)) {
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto enddcstat;
}

View file

@ -429,7 +429,7 @@ static void ebpf_cleanup_disk_list()
static void ebpf_disk_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
em->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
ebpf_disk_disable_tracepoints();
@ -444,7 +444,7 @@ static void ebpf_disk_free(ebpf_module_t *em)
ebpf_cleanup_disk_list();
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@ -761,25 +761,21 @@ void *ebpf_disk_thread(void *ptr)
em->maps = disk_maps;
if (ebpf_disk_enable_tracepoints()) {
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto enddisk;
}
avl_init_lock(&disk_tree, ebpf_compare_disks);
if (read_local_disks()) {
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto enddisk;
}
if (pthread_mutex_init(&plot_mutex, NULL)) {
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
error("Cannot initialize local mutex");
goto enddisk;
}
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto enddisk;
}

View file

@ -3,10 +3,6 @@
#include "ebpf.h"
#include "ebpf_fd.h"
// ----------------------------------------------------------------------------
// ARAL vectors used to speed up processing
ARAL *ebpf_aral_fd_pid = NULL;
static char *fd_dimension_names[NETDATA_FD_SYSCALL_END] = { "open", "close" };
static char *fd_id_names[NETDATA_FD_SYSCALL_END] = { "do_sys_open", "__close_fd" };
@ -40,17 +36,12 @@ static netdata_idx_t fd_hash_values[NETDATA_FD_COUNTER];
static netdata_idx_t *fd_values = NULL;
netdata_fd_stat_t *fd_vector = NULL;
netdata_fd_stat_t **fd_pid = NULL;
netdata_ebpf_targets_t fd_targets[] = { {.name = "open", .mode = EBPF_LOAD_TRAMPOLINE},
{.name = "close", .mode = EBPF_LOAD_TRAMPOLINE},
{.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
#ifdef LIBBPF_MAJOR_VERSION
#include "includes/fd.skel.h" // BTF code
static struct fd_bpf *bpf_obj = NULL;
/**
* Disable probe
*
@ -368,20 +359,14 @@ static inline int ebpf_fd_load_and_attach(struct fd_bpf *obj, ebpf_module_t *em)
static void ebpf_fd_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
em->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
ebpf_cleanup_publish_syscall(fd_publish_aggregated);
freez(fd_values);
freez(fd_vector);
#ifdef LIBBPF_MAJOR_VERSION
if (bpf_obj)
fd_bpf__destroy(bpf_obj);
#endif
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@ -398,46 +383,6 @@ static void ebpf_fd_exit(void *ptr)
ebpf_fd_free(em);
}
/*****************************************************************
*
* ARAL FUNCTIONS
*
*****************************************************************/
/**
* eBPF file descriptor Aral init
*
* Initiallize array allocator that will be used when integration with apps is enabled.
*/
static inline void ebpf_fd_aral_init()
{
ebpf_aral_fd_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_FD_ARAL_NAME, sizeof(netdata_fd_stat_t));
}
/**
* eBPF publish file descriptor get
*
* Get a netdata_fd_stat_t entry to be used with a specific PID.
*
* @return it returns the address on success.
*/
netdata_fd_stat_t *ebpf_fd_stat_get(void)
{
netdata_fd_stat_t *target = aral_mallocz(ebpf_aral_fd_pid);
memset(target, 0, sizeof(netdata_fd_stat_t));
return target;
}
/**
* eBPF file descriptor release
*
* @param stat Release a target after usage.
*/
void ebpf_fd_release(netdata_fd_stat_t *stat)
{
aral_freez(ebpf_aral_fd_pid, stat);
}
/*****************************************************************
*
* MAIN LOOP
@ -1142,17 +1087,16 @@ static int ebpf_fd_load_bpf(ebpf_module_t *em)
if (em->load & EBPF_LOAD_LEGACY) {
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
em->enabled = CONFIG_BOOLEAN_NO;
ret = -1;
}
}
#ifdef LIBBPF_MAJOR_VERSION
else {
bpf_obj = fd_bpf__open();
if (!bpf_obj)
fd_bpf_obj = fd_bpf__open();
if (!fd_bpf_obj)
ret = -1;
else
ret = ebpf_fd_load_and_attach(bpf_obj, em);
ret = ebpf_fd_load_and_attach(fd_bpf_obj, em);
}
#endif
@ -1182,7 +1126,6 @@ void *ebpf_fd_thread(void *ptr)
ebpf_adjust_thread_load(em, default_btf);
#endif
if (ebpf_fd_load_bpf(em)) {
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endfd;
}

View file

@ -85,7 +85,6 @@ void *ebpf_fd_thread(void *ptr);
void ebpf_fd_create_apps_charts(struct ebpf_module *em, void *ptr);
void ebpf_fd_release(netdata_fd_stat_t *stat);
extern struct config fd_config;
extern netdata_fd_stat_t **fd_pid;
extern netdata_ebpf_targets_t fd_targets[];
#endif /* NETDATA_EBPF_FD_H */

View file

@ -329,18 +329,16 @@ void ebpf_filesystem_cleanup_ebpf_data()
static void ebpf_filesystem_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
em->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
ebpf_cleanup_publish_syscall(filesystem_publish_aggregated);
ebpf_filesystem_cleanup_ebpf_data();
if (dimensions)
ebpf_histogram_dimension_cleanup(dimensions, NETDATA_EBPF_HIST_MAX_BINS);
freez(filesystem_hash_values);
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@ -570,7 +568,6 @@ void *ebpf_filesystem_thread(void *ptr)
if (em->optional)
info("Netdata cannot monitor the filesystems used on this host.");
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endfilesystem;
}

View file

@ -187,15 +187,11 @@ void ebpf_hardirq_release(hardirq_val_t *stat)
*/
static void ebpf_hardirq_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
for (int i = 0; hardirq_tracepoints[i].class != NULL; i++) {
ebpf_disable_tracepoint(&hardirq_tracepoints[i]);
}
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@ -314,7 +310,9 @@ static int hardirq_parse_interrupts(char *irq_name, int irq)
*/
static int hardirq_read_latency_map(int mapfd)
{
hardirq_ebpf_static_val_t hardirq_ebpf_vals[ebpf_nprocs + 1];
static hardirq_ebpf_static_val_t *hardirq_ebpf_vals = NULL;
if (!hardirq_ebpf_vals)
hardirq_ebpf_vals = callocz(ebpf_nprocs + 1, sizeof(hardirq_ebpf_static_val_t));
hardirq_ebpf_key_t key = {};
hardirq_ebpf_key_t next_key = {};
@ -390,7 +388,9 @@ static int hardirq_read_latency_map(int mapfd)
static void hardirq_read_latency_static_map(int mapfd)
{
hardirq_ebpf_static_val_t hardirq_ebpf_static_vals[ebpf_nprocs + 1];
static hardirq_ebpf_static_val_t *hardirq_ebpf_static_vals = NULL;
if (!hardirq_ebpf_static_vals)
hardirq_ebpf_static_vals = callocz(ebpf_nprocs + 1, sizeof(hardirq_ebpf_static_val_t));
uint32_t i;
for (i = 0; i < HARDIRQ_EBPF_STATIC_END; i++) {
@ -489,9 +489,12 @@ static inline void hardirq_write_static_dims()
/**
* Main loop for this collector.
*
* @param em the main thread structure.
*/
static void hardirq_collector(ebpf_module_t *em)
{
memset(&hardirq_pub, 0, sizeof(hardirq_pub));
avl_init_lock(&hardirq_pub, hardirq_val_cmp);
ebpf_hardirq_aral_init();
@ -549,13 +552,11 @@ void *ebpf_hardirq_thread(void *ptr)
em->maps = hardirq_maps;
if (ebpf_enable_tracepoints(hardirq_tracepoints) == 0) {
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endhardirq;
}
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endhardirq;
}

View file

@ -46,7 +46,7 @@ static void ebpf_mdflush_free(ebpf_module_t *em)
{
freez(mdflush_ebpf_vals);
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@ -247,24 +247,19 @@ void *ebpf_mdflush_thread(void *ptr)
char *md_flush_request = ebpf_find_symbol("md_flush_request");
if (!md_flush_request) {
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
error("Cannot monitor MD devices, because md is not loaded.");
}
freez(md_flush_request);
if (em->thread->enabled == NETDATA_THREAD_EBPF_STOPPED) {
goto endmdflush;
}
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
em->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endmdflush;
}
mdflush_collector(em);
endmdflush:
freez(md_flush_request);
ebpf_update_disabled_plugin_stats(em);
netdata_thread_cleanup_pop(1);

View file

@ -18,8 +18,6 @@ struct config mount_config = { .first_section = NULL, .last_section = NULL, .mut
.index = {.avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
static netdata_idx_t *mount_values = NULL;
static netdata_idx_t mount_hash_values[NETDATA_MOUNT_END];
netdata_ebpf_targets_t mount_targets[] = { {.name = "mount", .mode = EBPF_LOAD_TRAMPOLINE},
@ -27,10 +25,6 @@ netdata_ebpf_targets_t mount_targets[] = { {.name = "mount", .mode = EBPF_LOAD_T
{.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
#ifdef LIBBPF_MAJOR_VERSION
#include "includes/mount.skel.h" // BTF code
static struct mount_bpf *bpf_obj = NULL;
/*****************************************************************
*
* BTF FUNCTIONS
@ -228,18 +222,7 @@ static inline int ebpf_mount_load_and_attach(struct mount_bpf *obj, ebpf_module_
static void ebpf_mount_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
freez(mount_values);
#ifdef LIBBPF_MAJOR_VERSION
if (bpf_obj)
mount_bpf__destroy(bpf_obj);
#endif
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@ -269,6 +252,10 @@ static void ebpf_mount_exit(void *ptr)
*/
static void ebpf_mount_read_global_table()
{
static netdata_idx_t *mount_values = NULL;
if (!mount_values)
mount_values = callocz((size_t)ebpf_nprocs + 1, sizeof(netdata_idx_t));
uint32_t idx;
netdata_idx_t *val = mount_hash_values;
netdata_idx_t *stored = mount_values;
@ -311,7 +298,6 @@ static void ebpf_mount_send_data()
*/
static void mount_collector(ebpf_module_t *em)
{
mount_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
memset(mount_hash_values, 0, sizeof(mount_hash_values));
heartbeat_t hb;
@ -390,17 +376,16 @@ static int ebpf_mount_load_bpf(ebpf_module_t *em)
if (em->load & EBPF_LOAD_LEGACY) {
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
em->enabled = CONFIG_BOOLEAN_NO;
ret = -1;
}
}
#ifdef LIBBPF_MAJOR_VERSION
else {
bpf_obj = mount_bpf__open();
if (!bpf_obj)
mount_bpf_obj = mount_bpf__open();
if (!mount_bpf_obj)
ret = -1;
else
ret = ebpf_mount_load_and_attach(bpf_obj, em);
ret = ebpf_mount_load_and_attach(mount_bpf_obj, em);
}
#endif
@ -430,7 +415,6 @@ void *ebpf_mount_thread(void *ptr)
ebpf_adjust_thread_load(em, default_btf);
#endif
if (ebpf_mount_load_bpf(em)) {
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endmount;
}

View file

@ -47,7 +47,7 @@ static void oomkill_cleanup(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@ -303,23 +303,24 @@ static void oomkill_collector(ebpf_module_t *em)
continue;
counter = 0;
pthread_mutex_lock(&collect_data_mutex);
pthread_mutex_lock(&lock);
uint32_t count = oomkill_read_data(keys);
if (cgroups && count)
ebpf_update_oomkill_cgroup(keys, count);
if (!count)
continue;
// write everything from the ebpf map.
if (cgroups)
pthread_mutex_lock(&collect_data_mutex);
pthread_mutex_lock(&lock);
if (cgroups) {
ebpf_update_oomkill_cgroup(keys, count);
// write everything from the ebpf map.
ebpf_oomkill_send_cgroup_data(update_every);
}
if (em->apps_charts & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) {
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_OOMKILL_CHART);
oomkill_write_data(keys, count);
write_end_chart();
}
pthread_mutex_unlock(&lock);
pthread_mutex_unlock(&collect_data_mutex);
}
@ -364,29 +365,27 @@ void *ebpf_oomkill_thread(void *ptr)
if (unlikely(!ebpf_all_pids || !em->apps_charts)) {
// When we are not running integration with apps, we won't fill necessary variables for this thread to run, so
// we need to disable it.
if (em->thread->enabled)
pthread_mutex_lock(&ebpf_exit_cleanup);
if (em->enabled)
info("%s apps integration is completely disabled.", NETDATA_DEFAULT_OOM_DISABLED_MSG);
pthread_mutex_unlock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endoomkill;
} else if (running_on_kernel < NETDATA_EBPF_KERNEL_4_14) {
if (em->thread->enabled)
pthread_mutex_lock(&ebpf_exit_cleanup);
if (em->enabled)
info("%s kernel does not have necessary tracepoints.", NETDATA_DEFAULT_OOM_DISABLED_MSG);
pthread_mutex_unlock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
}
if (em->thread->enabled == NETDATA_THREAD_EBPF_STOPPED) {
goto endoomkill;
}
if (ebpf_enable_tracepoints(oomkill_tracepoints) == 0) {
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endoomkill;
}
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endoomkill;
}

View file

@ -42,8 +42,6 @@ static netdata_idx_t *process_hash_values = NULL;
static netdata_syscall_stat_t process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_END];
static netdata_publish_syscall_t process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_END];
ebpf_process_stat_t **global_process_stats = NULL;
int process_enabled = 0;
bool publish_internal_metrics = true;
@ -607,58 +605,6 @@ void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr)
em->apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED;
}
/**
* Create apps charts
*
* Call ebpf_create_chart to create the charts on apps submenu.
*
* @param root a pointer for the targets.
*/
static void ebpf_create_apps_charts(struct ebpf_target *root)
{
if (unlikely(!ebpf_all_pids))
return;
struct ebpf_target *w;
int newly_added = 0;
for (w = root; w; w = w->next) {
if (w->target)
continue;
if (unlikely(w->processes && (debug_enabled || w->debug_enabled))) {
struct ebpf_pid_on_target *pid_on_target;
fprintf(
stderr, "ebpf.plugin: target '%s' has aggregated %u process%s:", w->name, w->processes,
(w->processes == 1) ? "" : "es");
for (pid_on_target = w->root_pid; pid_on_target; pid_on_target = pid_on_target->next) {
fprintf(stderr, " %d", pid_on_target->pid);
}
fputc('\n', stderr);
}
if (!w->exposed && w->processes) {
newly_added++;
w->exposed = 1;
if (debug_enabled || w->debug_enabled)
debug_log_int("%s just added - regenerating charts.", w->name);
}
}
if (!newly_added)
return;
int counter;
for (counter = 0; ebpf_modules[counter].thread_name; counter++) {
ebpf_module_t *current = &ebpf_modules[counter];
if (current->enabled && current->apps_charts && current->apps_routine)
current->apps_routine(current, root);
}
}
/*****************************************************************
*
* FUNCTIONS TO CLOSE THE THREAD
@ -700,13 +646,13 @@ static void ebpf_process_exit(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
ebpf_cleanup_publish_syscall(process_publish_aggregated);
freez(process_hash_values);
ebpf_process_disable_tracepoints();
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
process_pid_fd = -1;
em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@ -1033,8 +979,7 @@ void ebpf_process_update_cgroup_algorithm()
int i;
for (i = 0; i < NETDATA_KEY_PUBLISH_PROCESS_END; i++) {
netdata_publish_syscall_t *ptr = &process_publish_aggregated[i];
freez(ptr->algorithm);
ptr->algorithm = strdupz(ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
ptr->algorithm = ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX];
}
}
@ -1078,29 +1023,21 @@ static void process_collector(ebpf_module_t *em)
heartbeat_init(&hb);
int publish_global = em->global_charts;
int cgroups = em->cgroup_charts;
pthread_mutex_lock(&ebpf_exit_cleanup);
int thread_enabled = em->enabled;
process_pid_fd = process_maps[NETDATA_PROCESS_PID_TABLE].map_fd;
pthread_mutex_unlock(&ebpf_exit_cleanup);
if (cgroups)
ebpf_process_update_cgroup_algorithm();
int update_apps_every = (int) EBPF_CFG_UPDATE_APPS_EVERY_DEFAULT;
int pid_fd = process_maps[NETDATA_PROCESS_PID_TABLE].map_fd;
int update_every = em->update_every;
int counter = update_every - 1;
int update_apps_list = update_apps_every - 1;
while (!ebpf_exit_plugin) {
usec_t dt = heartbeat_next(&hb, USEC_PER_SEC);
(void)dt;
if (ebpf_exit_plugin)
break;
pthread_mutex_lock(&collect_data_mutex);
if (++update_apps_list == update_apps_every) {
update_apps_list = 0;
cleanup_exited_pids();
collect_data_for_all_processes(pid_fd);
}
pthread_mutex_unlock(&collect_data_mutex);
if (++counter == update_every) {
counter = 0;
@ -1109,7 +1046,6 @@ static void process_collector(ebpf_module_t *em)
netdata_apps_integration_flags_t apps_enabled = em->apps_charts;
pthread_mutex_lock(&collect_data_mutex);
ebpf_create_apps_charts(apps_groups_root_target);
if (ebpf_all_pids_count > 0) {
if (cgroups && shm_ebpf_cgroup.header) {
ebpf_update_process_cgroup();
@ -1119,7 +1055,7 @@ static void process_collector(ebpf_module_t *em)
pthread_mutex_lock(&lock);
ebpf_send_statistic_data();
if (thread_enabled) {
if (thread_enabled == NETDATA_THREAD_EBPF_RUNNING) {
if (publish_global) {
ebpf_process_send_data(em);
}
@ -1244,10 +1180,12 @@ void *ebpf_process_thread(void *ptr)
ebpf_module_t *em = (ebpf_module_t *)ptr;
em->maps = process_maps;
pthread_mutex_lock(&ebpf_exit_cleanup);
if (ebpf_process_enable_tracepoints()) {
em->enabled = em->global_charts = em->apps_charts = em->cgroup_charts = CONFIG_BOOLEAN_NO;
em->enabled = em->global_charts = em->apps_charts = em->cgroup_charts = NETDATA_THREAD_EBPF_STOPPING;
}
process_enabled = em->enabled;
pthread_mutex_unlock(&ebpf_exit_cleanup);
pthread_mutex_lock(&lock);
ebpf_process_allocate_global_vectors(NETDATA_KEY_PUBLISH_PROCESS_END);
@ -1257,7 +1195,6 @@ void *ebpf_process_thread(void *ptr)
set_local_pointers();
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
em->enabled = CONFIG_BOOLEAN_NO;
pthread_mutex_unlock(&lock);
goto endprocess;
}
@ -1270,7 +1207,7 @@ void *ebpf_process_thread(void *ptr)
process_aggregated_data, process_publish_aggregated, process_dimension_names, process_id_names,
algorithms, NETDATA_KEY_PUBLISH_PROCESS_END);
if (process_enabled) {
if (process_enabled == NETDATA_THREAD_EBPF_RUNNING) {
ebpf_create_global_charts(em);
}
@ -1289,8 +1226,10 @@ void *ebpf_process_thread(void *ptr)
process_collector(em);
endprocess:
if (!em->enabled)
pthread_mutex_lock(&ebpf_exit_cleanup);
if (em->enabled == NETDATA_THREAD_EBPF_RUNNING)
ebpf_update_disabled_plugin_stats(em);
pthread_mutex_unlock(&ebpf_exit_cleanup);
netdata_thread_cleanup_pop(1);
return NULL;

View file

@ -3,10 +3,6 @@
#include "ebpf.h"
#include "ebpf_shm.h"
// ----------------------------------------------------------------------------
// ARAL vectors used to speed up processing
ARAL *ebpf_aral_shm_pid = NULL;
static char *shm_dimension_name[NETDATA_SHM_END] = { "get", "at", "dt", "ctl" };
static netdata_syscall_stat_t shm_aggregated_data[NETDATA_SHM_END];
static netdata_publish_syscall_t shm_publish_aggregated[NETDATA_SHM_END];
@ -16,8 +12,6 @@ netdata_publish_shm_t *shm_vector = NULL;
static netdata_idx_t shm_hash_values[NETDATA_SHM_END];
static netdata_idx_t *shm_values = NULL;
netdata_publish_shm_t **shm_pid = NULL;
struct config shm_config = { .first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
@ -45,10 +39,6 @@ netdata_ebpf_targets_t shm_targets[] = { {.name = "shmget", .mode = EBPF_LOAD_TR
{.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
#ifdef LIBBPF_MAJOR_VERSION
#include "includes/shm.skel.h"
static struct shm_bpf *bpf_obj = NULL;
/*****************************************************************
*
* BTF FUNCTIONS
@ -291,22 +281,11 @@ static inline int ebpf_shm_load_and_attach(struct shm_bpf *obj, ebpf_module_t *e
*/
static void ebpf_shm_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
ebpf_cleanup_publish_syscall(shm_publish_aggregated);
freez(shm_vector);
freez(shm_values);
#ifdef LIBBPF_MAJOR_VERSION
if (bpf_obj)
shm_bpf__destroy(bpf_obj);
#endif
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@ -323,46 +302,6 @@ static void ebpf_shm_exit(void *ptr)
ebpf_shm_free(em);
}
/*****************************************************************
*
* ARAL FUNCTIONS
*
*****************************************************************/
/**
* eBPF shared memory Aral init
*
* Initiallize array allocator that will be used when integration with apps is enabled.
*/
static inline void ebpf_shm_aral_init()
{
ebpf_aral_shm_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_SHM_ARAL_NAME, sizeof(netdata_publish_shm_t));
}
/**
* eBPF shared memory get
*
* Get a netdata_publish_shm_t entry to be used with a specific PID.
*
* @return it returns the address on success.
*/
netdata_publish_shm_t *ebpf_shm_stat_get(void)
{
netdata_publish_shm_t *target = aral_mallocz(ebpf_aral_shm_pid);
memset(target, 0, sizeof(netdata_publish_shm_t));
return target;
}
/**
* eBPF shared memory release
*
* @param stat Release a target after usage.
*/
void ebpf_shm_release(netdata_publish_shm_t *stat)
{
aral_freez(ebpf_aral_shm_pid, stat);
}
/*****************************************************************
* COLLECTOR THREAD
*****************************************************************/
@ -1051,17 +990,16 @@ static int ebpf_shm_load_bpf(ebpf_module_t *em)
if (em->load & EBPF_LOAD_LEGACY) {
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
em->enabled = CONFIG_BOOLEAN_NO;
ret = -1;
}
}
#ifdef LIBBPF_MAJOR_VERSION
else {
bpf_obj = shm_bpf__open();
if (!bpf_obj)
shm_bpf_obj = shm_bpf__open();
if (!shm_bpf_obj)
ret = -1;
else
ret = ebpf_shm_load_and_attach(bpf_obj, em);
ret = ebpf_shm_load_and_attach(shm_bpf_obj, em);
}
#endif
@ -1091,7 +1029,6 @@ void *ebpf_shm_thread(void *ptr)
ebpf_adjust_thread_load(em, default_btf);
#endif
if (ebpf_shm_load_bpf(em)) {
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endshm;
}

View file

@ -53,8 +53,6 @@ enum shm_counters {
NETDATA_SHM_END
};
extern netdata_publish_shm_t **shm_pid;
void *ebpf_shm_thread(void *ptr);
void ebpf_shm_create_apps_charts(struct ebpf_module *em, void *ptr);
void ebpf_shm_release(netdata_publish_shm_t *stat);

View file

@ -7,7 +7,6 @@
// ----------------------------------------------------------------------------
// ARAL vectors used to speed up processing
ARAL *ebpf_aral_socket_pid = NULL;
/*****************************************************************
*
@ -62,7 +61,6 @@ static netdata_idx_t *socket_hash_values = NULL;
static netdata_syscall_stat_t socket_aggregated_data[NETDATA_MAX_SOCKET_VECTOR];
static netdata_publish_syscall_t socket_publish_aggregated[NETDATA_MAX_SOCKET_VECTOR];
ebpf_socket_publish_apps_t **socket_bandwidth_curr = NULL;
static ebpf_bandwidth_t *bandwidth_vector = NULL;
pthread_mutex_t nv_mutex;
@ -101,10 +99,6 @@ struct netdata_static_thread socket_threads = {
};
#ifdef LIBBPF_MAJOR_VERSION
#include "includes/socket.skel.h" // BTF code
static struct socket_bpf *bpf_obj = NULL;
/**
* Disable Probe
*
@ -433,46 +427,6 @@ static inline int ebpf_socket_load_and_attach(struct socket_bpf *obj, ebpf_modul
}
#endif
/*****************************************************************
*
* ARAL FUNCTIONS
*
*****************************************************************/
/**
* eBPF socket Aral init
*
* Initiallize array allocator that will be used when integration with apps is enabled.
*/
static inline void ebpf_socket_aral_init()
{
ebpf_aral_socket_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_SOCKET_ARAL_NAME, sizeof(ebpf_socket_publish_apps_t));
}
/**
* eBPF socket get
*
* Get a ebpf_socket_publish_apps_t entry to be used with a specific PID.
*
* @return it returns the address on success.
*/
ebpf_socket_publish_apps_t *ebpf_socket_stat_get(void)
{
ebpf_socket_publish_apps_t *target = aral_mallocz(ebpf_aral_socket_pid);
memset(target, 0, sizeof(ebpf_socket_publish_apps_t));
return target;
}
/**
* eBPF socket release
*
* @param stat Release a target after usage.
*/
void ebpf_socket_release(ebpf_socket_publish_apps_t *stat)
{
aral_freez(ebpf_aral_socket_pid, stat);
}
/*****************************************************************
*
* FUNCTIONS TO CLOSE THE THREAD
@ -498,7 +452,6 @@ static inline void clean_internal_socket_plot(netdata_socket_plot_t *ptr)
* Clean socket plot
*
* Clean the allocated data for inbound and outbound vectors.
*/
static void clean_allocated_socket_plot()
{
if (!network_viewer_opt.enabled)
@ -520,12 +473,12 @@ static void clean_allocated_socket_plot()
}
clean_internal_socket_plot(&plot[outbound_vectors.last]);
}
*/
/**
* Clean network ports allocated during initialization.
*
* @param ptr a pointer to the link list.
*/
static void clean_network_ports(ebpf_network_viewer_port_list_t *ptr)
{
if (unlikely(!ptr))
@ -538,6 +491,7 @@ static void clean_network_ports(ebpf_network_viewer_port_list_t *ptr)
ptr = next;
}
}
*/
/**
* Clean service names
@ -545,7 +499,6 @@ static void clean_network_ports(ebpf_network_viewer_port_list_t *ptr)
* Clean the allocated link list that stores names.
*
* @param names the link list.
*/
static void clean_service_names(ebpf_network_viewer_dim_name_t *names)
{
if (unlikely(!names))
@ -558,12 +511,12 @@ static void clean_service_names(ebpf_network_viewer_dim_name_t *names)
names = next;
}
}
*/
/**
* Clean hostnames
*
* @param hostnames the hostnames to clean
*/
static void clean_hostnames(ebpf_network_viewer_hostname_list_t *hostnames)
{
if (unlikely(!hostnames))
@ -577,19 +530,7 @@ static void clean_hostnames(ebpf_network_viewer_hostname_list_t *hostnames)
hostnames = next;
}
}
/**
* Cleanup publish syscall
*
* @param nps list of structures to clean
*/
void ebpf_cleanup_publish_syscall(netdata_publish_syscall_t *nps)
{
while (nps) {
freez(nps->algorithm);
nps = nps->next;
}
}
/**
* Clean port Structure
@ -640,15 +581,8 @@ static void clean_ip_structure(ebpf_network_viewer_ip_list_t **clean)
*/
static void ebpf_socket_free(ebpf_module_t *em )
{
pthread_mutex_lock(&ebpf_exit_cleanup);
if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) {
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
return;
}
pthread_mutex_unlock(&ebpf_exit_cleanup);
ebpf_cleanup_publish_syscall(socket_publish_aggregated);
/* We can have thousands of sockets to clean, so we are transferring
* for OS the responsibility while we do not use ARAL here
freez(socket_hash_values);
freez(bandwidth_vector);
@ -660,25 +594,17 @@ static void ebpf_socket_free(ebpf_module_t *em )
clean_port_structure(&listen_ports);
ebpf_modules[EBPF_MODULE_SOCKET_IDX].enabled = 0;
clean_network_ports(network_viewer_opt.included_port);
clean_network_ports(network_viewer_opt.excluded_port);
clean_service_names(network_viewer_opt.names);
clean_hostnames(network_viewer_opt.included_hostnames);
clean_hostnames(network_viewer_opt.excluded_hostnames);
*/
pthread_mutex_destroy(&nv_mutex);
freez(socket_threads.thread);
#ifdef LIBBPF_MAJOR_VERSION
if (bpf_obj)
socket_bpf__destroy(bpf_obj);
#endif
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@ -692,8 +618,10 @@ static void ebpf_socket_free(ebpf_module_t *em )
static void ebpf_socket_exit(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
pthread_mutex_lock(&nv_mutex);
if (socket_threads.thread)
netdata_thread_cancel(*socket_threads.thread);
pthread_mutex_unlock(&nv_mutex);
ebpf_socket_free(em);
}
@ -706,8 +634,7 @@ static void ebpf_socket_exit(void *ptr)
*/
void ebpf_socket_cleanup(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
ebpf_socket_free(em);
UNUSED(ptr);
}
/*****************************************************************
@ -2200,10 +2127,11 @@ void *ebpf_socket_read_hash(void *ptr)
heartbeat_init(&hb);
int fd_ipv4 = socket_maps[NETDATA_SOCKET_TABLE_IPV4].map_fd;
int fd_ipv6 = socket_maps[NETDATA_SOCKET_TABLE_IPV6].map_fd;
while (!ebpf_exit_plugin) {
// This thread is cancelled from another thread
for (;;) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
if (ebpf_exit_plugin)
continue;
break;
pthread_mutex_lock(&nv_mutex);
ebpf_read_socket_hash_table(fd_ipv4, AF_INET);
@ -2838,8 +2766,7 @@ void ebpf_socket_update_cgroup_algorithm()
int i;
for (i = 0; i < NETDATA_MAX_SOCKET_VECTOR; i++) {
netdata_publish_syscall_t *ptr = &socket_publish_aggregated[i];
freez(ptr->algorithm);
ptr->algorithm = strdupz(ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
ptr->algorithm = ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX];
}
}
@ -3938,11 +3865,11 @@ static int ebpf_socket_load_bpf(ebpf_module_t *em)
}
#ifdef LIBBPF_MAJOR_VERSION
else {
bpf_obj = socket_bpf__open();
if (!bpf_obj)
socket_bpf_obj = socket_bpf__open();
if (!socket_bpf_obj)
ret = -1;
else
ret = ebpf_socket_load_and_attach(bpf_obj, em);
ret = ebpf_socket_load_and_attach(socket_bpf_obj, em);
}
#endif
@ -3972,7 +3899,6 @@ void *ebpf_socket_thread(void *ptr)
parse_table_size_options(&socket_config);
if (pthread_mutex_init(&nv_mutex, NULL)) {
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
error("Cannot initialize local mutex");
goto endsocket;
}
@ -3995,7 +3921,6 @@ void *ebpf_socket_thread(void *ptr)
ebpf_adjust_thread_load(em, default_btf);
#endif
if (ebpf_socket_load_bpf(em)) {
em->enabled = CONFIG_BOOLEAN_NO;
pthread_mutex_unlock(&lock);
goto endsocket;
}

View file

@ -366,9 +366,7 @@ void update_listen_table(uint16_t value, uint16_t proto, netdata_passive_connect
void parse_network_viewer_section(struct config *cfg);
void ebpf_fill_ip_list(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_list_t *in, char *table);
void parse_service_name_section(struct config *cfg);
void ebpf_socket_release(ebpf_socket_publish_apps_t *stat);
extern ebpf_socket_publish_apps_t **socket_bandwidth_curr;
extern struct config socket_config;
extern netdata_ebpf_targets_t socket_targets[];

View file

@ -64,7 +64,7 @@ static softirq_ebpf_val_t *softirq_ebpf_vals = NULL;
static void ebpf_softirq_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
em->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
for (int i = 0; softirq_tracepoints[i].class != NULL; i++) {
@ -73,7 +73,7 @@ static void ebpf_softirq_free(ebpf_module_t *em)
freez(softirq_ebpf_vals);
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@ -209,13 +209,11 @@ void *ebpf_softirq_thread(void *ptr)
em->maps = softirq_maps;
if (ebpf_enable_tracepoints(softirq_tracepoints) == 0) {
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endsoftirq;
}
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endsoftirq;
}

View file

@ -7,12 +7,10 @@ static char *swap_dimension_name[NETDATA_SWAP_END] = { "read", "write" };
static netdata_syscall_stat_t swap_aggregated_data[NETDATA_SWAP_END];
static netdata_publish_syscall_t swap_publish_aggregated[NETDATA_SWAP_END];
netdata_publish_swap_t *swap_vector = NULL;
static netdata_idx_t swap_hash_values[NETDATA_SWAP_END];
static netdata_idx_t *swap_values = NULL;
netdata_publish_swap_t **swap_pid = NULL;
netdata_publish_swap_t *swap_vector = NULL;
struct config swap_config = { .first_section = NULL,
.last_section = NULL,
@ -39,10 +37,6 @@ netdata_ebpf_targets_t swap_targets[] = { {.name = "swap_readpage", .mode = EBPF
{.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
#ifdef LIBBPF_MAJOR_VERSION
#include "includes/swap.skel.h" // BTF code
static struct swap_bpf *bpf_obj = NULL;
/**
* Disable probe
*
@ -224,21 +218,11 @@ static inline int ebpf_swap_load_and_attach(struct swap_bpf *obj, ebpf_module_t
*/
static void ebpf_swap_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
ebpf_cleanup_publish_syscall(swap_publish_aggregated);
freez(swap_vector);
freez(swap_values);
#ifdef LIBBPF_MAJOR_VERSION
if (bpf_obj)
swap_bpf__destroy(bpf_obj);
#endif
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@ -829,7 +813,6 @@ void *ebpf_swap_thread(void *ptr)
ebpf_adjust_thread_load(em, default_btf);
#endif
if (ebpf_swap_load_bpf(em)) {
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endswap;
}

View file

@ -42,8 +42,6 @@ enum swap_counters {
NETDATA_SWAP_END
};
extern netdata_publish_swap_t **swap_pid;
void *ebpf_swap_thread(void *ptr);
void ebpf_swap_create_apps_charts(struct ebpf_module *em, void *ptr);

View file

@ -204,16 +204,12 @@ void ebpf_sync_cleanup_objects()
*/
static void ebpf_sync_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
#ifdef LIBBPF_MAJOR_VERSION
ebpf_sync_cleanup_objects();
#endif
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@ -523,7 +519,6 @@ void *ebpf_sync_thread(void *ptr)
ebpf_adjust_thread_load(em, default_btf);
#endif
if (ebpf_sync_initialize_syscall(em)) {
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endsync;
}

View file

@ -5,10 +5,6 @@
#include "ebpf.h"
#include "ebpf_vfs.h"
// ----------------------------------------------------------------------------
// ARAL vectors used to speed up processing
ARAL *ebpf_aral_vfs_pid = NULL;
static char *vfs_dimension_names[NETDATA_KEY_PUBLISH_VFS_END] = { "delete", "read", "write",
"fsync", "open", "create" };
static char *vfs_id_names[NETDATA_KEY_PUBLISH_VFS_END] = { "vfs_unlink", "vfs_read", "vfs_write",
@ -17,7 +13,6 @@ static char *vfs_id_names[NETDATA_KEY_PUBLISH_VFS_END] = { "vfs_unlink", "vfs_re
static netdata_idx_t *vfs_hash_values = NULL;
static netdata_syscall_stat_t vfs_aggregated_data[NETDATA_KEY_PUBLISH_VFS_END];
static netdata_publish_syscall_t vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_END];
netdata_publish_vfs_t **vfs_pid = NULL;
netdata_publish_vfs_t *vfs_vector = NULL;
static ebpf_local_maps_t vfs_maps[] = {{.name = "tbl_vfs_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
@ -50,10 +45,6 @@ netdata_ebpf_targets_t vfs_targets[] = { {.name = "vfs_write", .mode = EBPF_LOAD
{.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
#ifdef LIBBPF_MAJOR_VERSION
#include "includes/vfs.skel.h" // BTF code
static struct vfs_bpf *bpf_obj = NULL;
/**
* Disable probe
*
@ -386,46 +377,6 @@ static inline int ebpf_vfs_load_and_attach(struct vfs_bpf *obj, ebpf_module_t *e
}
#endif
/*****************************************************************
*
* ARAL FUNCTIONS
*
*****************************************************************/
/**
* eBPF VFS Aral init
*
* Initiallize array allocator that will be used when integration with apps is enabled.
*/
static inline void ebpf_vfs_aral_init()
{
ebpf_aral_vfs_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_VFS_ARAL_NAME, sizeof(netdata_publish_vfs_t));
}
/**
* eBPF publish VFS get
*
* Get a netdata_publish_vfs_t entry to be used with a specific PID.
*
* @return it returns the address on success.
*/
netdata_publish_vfs_t *ebpf_vfs_get(void)
{
netdata_publish_vfs_t *target = aral_mallocz(ebpf_aral_vfs_pid);
memset(target, 0, sizeof(netdata_publish_vfs_t));
return target;
}
/**
* eBPF VFS release
*
* @param stat Release a target after usage.
*/
void ebpf_vfs_release(netdata_publish_vfs_t *stat)
{
aral_freez(ebpf_aral_vfs_pid, stat);
}
/*****************************************************************
*
* FUNCTIONS TO CLOSE THE THREAD
@ -441,20 +392,11 @@ void ebpf_vfs_release(netdata_publish_vfs_t *stat)
*/
static void ebpf_vfs_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
freez(vfs_hash_values);
freez(vfs_vector);
#ifdef LIBBPF_MAJOR_VERSION
if (bpf_obj)
vfs_bpf__destroy(bpf_obj);
#endif
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@ -1911,11 +1853,11 @@ static int ebpf_vfs_load_bpf(ebpf_module_t *em)
}
#ifdef LIBBPF_MAJOR_VERSION
else {
bpf_obj = vfs_bpf__open();
if (!bpf_obj)
vfs_bpf_obj = vfs_bpf__open();
if (!vfs_bpf_obj)
ret = -1;
else
ret = ebpf_vfs_load_and_attach(bpf_obj, em);
ret = ebpf_vfs_load_and_attach(vfs_bpf_obj, em);
}
#endif
@ -1946,7 +1888,6 @@ void *ebpf_vfs_thread(void *ptr)
ebpf_adjust_thread_load(em, default_btf);
#endif
if (ebpf_vfs_load_bpf(em)) {
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endvfs;
}

View file

@ -167,8 +167,6 @@ enum netdata_vfs_calls_name {
NETDATA_VFS_END_LIST
};
extern netdata_publish_vfs_t **vfs_pid;
void *ebpf_vfs_thread(void *ptr);
void ebpf_vfs_create_apps_charts(struct ebpf_module *em, void *ptr);
void ebpf_vfs_release(netdata_publish_vfs_t *stat);

View file

@ -441,7 +441,7 @@ void ebpf_update_stats(ebpf_plugin_stats_t *report, ebpf_module_t *em)
report->threads++;
// It is not necessary to report more information.
if (!em->enabled)
if (em->enabled != NETDATA_THREAD_EBPF_RUNNING)
return;
report->running++;

View file

@ -260,10 +260,17 @@ typedef enum netdata_apps_integration_flags {
#define NETDATA_EBPF_STAT_DIMENSION_MEMORY "memory"
#define NETDATA_EBPF_STAT_DIMENSION_ARAL "aral"
enum ebpf_threads_status {
NETDATA_THREAD_EBPF_RUNNING,
NETDATA_THREAD_EBPF_STOPPING,
NETDATA_THREAD_EBPF_STOPPED,
NETDATA_THREAD_EBPF_NOT_RUNNING
};
typedef struct ebpf_module {
const char *thread_name;
const char *config_name;
int enabled;
enum ebpf_threads_status enabled;
void *(*start_routine)(void *);
int update_every;
int global_charts;