0
0
Fork 0
mirror of https://github.com/netdata/netdata.git synced 2025-05-03 17:00:03 +00:00
netdata_netdata/libnetdata/onewayalloc/onewayalloc.c
vkalintiris 0e230a260e
Revert "Refactor RRD code. ()" ()
This reverts commit 440bd51e08.

dbengine was still being used for non-zero tiers
even on non-dbengine modes.
2023-08-03 13:13:36 +03:00

215 lines
6.4 KiB
C

#include "onewayalloc.h"
// https://www.gnu.org/software/libc/manual/html_node/Aligned-Memory-Blocks.html
#define OWA_NATURAL_ALIGNMENT (sizeof(uintptr_t) * 2)
typedef struct owa_page {
size_t stats_pages;
size_t stats_pages_size;
size_t stats_mallocs_made;
size_t stats_mallocs_size;
size_t size; // the total size of the page
size_t offset; // the first free byte of the page
struct owa_page *next; // the next page on the list
struct owa_page *last; // the last page on the list - we currently allocate on this
} OWA_PAGE;
static size_t onewayalloc_total_memory = 0;
size_t onewayalloc_allocated_memory(void) {
return __atomic_load_n(&onewayalloc_total_memory, __ATOMIC_RELAXED);
}
// allocations need to be aligned to CPU register width
// https://en.wikipedia.org/wiki/Data_structure_alignment
static inline size_t natural_alignment(size_t size) {
if(unlikely(size % OWA_NATURAL_ALIGNMENT))
size = size + OWA_NATURAL_ALIGNMENT - (size % OWA_NATURAL_ALIGNMENT);
return size;
}
// Create an OWA
// Once it is created, the called may call the onewayalloc_mallocz()
// any number of times, for any amount of memory.
static OWA_PAGE *onewayalloc_create_internal(OWA_PAGE *head, size_t size_hint) {
static size_t OWA_NATURAL_PAGE_SIZE = 0;
if(unlikely(!OWA_NATURAL_PAGE_SIZE)) {
long int page_size = sysconf(_SC_PAGE_SIZE);
if (unlikely(page_size == -1))
OWA_NATURAL_PAGE_SIZE = 4096;
else
OWA_NATURAL_PAGE_SIZE = page_size;
}
// our default page size
size_t size = OWA_NATURAL_PAGE_SIZE;
// make sure the new page will fit both the requested size
// and the OWA_PAGE structure at its beginning
size_hint += natural_alignment(sizeof(OWA_PAGE));
// prefer the user size if it is bigger than our size
if(size_hint > size) size = size_hint;
// try to allocate half of the total we have allocated already
if(likely(head)) {
size_t optimal_size = head->stats_pages_size / 2;
if(optimal_size > size) size = optimal_size;
}
// Make sure our allocations are always a multiple of the hardware page size
if(size % OWA_NATURAL_PAGE_SIZE) size = size + OWA_NATURAL_PAGE_SIZE - (size % OWA_NATURAL_PAGE_SIZE);
// OWA_PAGE *page = (OWA_PAGE *)netdata_mmap(NULL, size, MAP_ANONYMOUS|MAP_PRIVATE, 0);
// if(unlikely(!page)) fatal("Cannot allocate onewayalloc buffer of size %zu", size);
OWA_PAGE *page = (OWA_PAGE *)mallocz(size);
__atomic_add_fetch(&onewayalloc_total_memory, size, __ATOMIC_RELAXED);
page->size = size;
page->offset = natural_alignment(sizeof(OWA_PAGE));
page->next = page->last = NULL;
if(unlikely(!head)) {
// this is the first time we are called
head = page;
head->stats_pages = 0;
head->stats_pages_size = 0;
head->stats_mallocs_made = 0;
head->stats_mallocs_size = 0;
}
else {
// link this page into our existing linked list
head->last->next = page;
}
head->last = page;
head->stats_pages++;
head->stats_pages_size += size;
return page;
}
ONEWAYALLOC *onewayalloc_create(size_t size_hint) {
return (ONEWAYALLOC *)onewayalloc_create_internal(NULL, size_hint);
}
void *onewayalloc_mallocz(ONEWAYALLOC *owa, size_t size) {
#ifdef FSANITIZE_ADDRESS
return mallocz(size);
#endif
OWA_PAGE *head = (OWA_PAGE *)owa;
OWA_PAGE *page = head->last;
// update stats
head->stats_mallocs_made++;
head->stats_mallocs_size += size;
// make sure the size is aligned
size = natural_alignment(size);
if(unlikely(page->size - page->offset < size)) {
// we don't have enough space to fit the data
// let's get another page
page = onewayalloc_create_internal(head, (size > page->size)?size:page->size);
}
char *mem = (char *)page;
mem = &mem[page->offset];
page->offset += size;
return (void *)mem;
}
void *onewayalloc_callocz(ONEWAYALLOC *owa, size_t nmemb, size_t size) {
size_t total = nmemb * size;
void *mem = onewayalloc_mallocz(owa, total);
memset(mem, 0, total);
return mem;
}
char *onewayalloc_strdupz(ONEWAYALLOC *owa, const char *s) {
size_t size = strlen(s) + 1;
char *d = onewayalloc_mallocz((OWA_PAGE *)owa, size);
memcpy(d, s, size);
return d;
}
void *onewayalloc_memdupz(ONEWAYALLOC *owa, const void *src, size_t size) {
void *mem = onewayalloc_mallocz((OWA_PAGE *)owa, size);
// memcpy() is way faster than strcpy() since it does not check for '\0'
memcpy(mem, src, size);
return mem;
}
void onewayalloc_freez(ONEWAYALLOC *owa __maybe_unused, const void *ptr __maybe_unused) {
#ifdef FSANITIZE_ADDRESS
freez((void *)ptr);
return;
#endif
#ifdef NETDATA_INTERNAL_CHECKS
// allow the caller to call us for a mallocz() allocation
// so try to find it in our memory and if it is not there
// log an error
if (unlikely(!ptr))
return;
OWA_PAGE *head = (OWA_PAGE *)owa;
OWA_PAGE *page;
uintptr_t seeking = (uintptr_t)ptr;
for(page = head; page ;page = page->next) {
uintptr_t start = (uintptr_t)page;
uintptr_t end = start + page->size;
if(seeking >= start && seeking <= end) {
// found it - it is ours
// just return to let the caller think we actually did something
return;
}
}
// not found - it is not ours
// let's free it with the system allocator
netdata_log_error("ONEWAYALLOC: request to free address 0x%p that is not allocated by this OWA", ptr);
#endif
return;
}
void *onewayalloc_doublesize(ONEWAYALLOC *owa, const void *src, size_t oldsize) {
size_t newsize = oldsize * 2;
void *dst = onewayalloc_mallocz(owa, newsize);
memcpy(dst, src, oldsize);
onewayalloc_freez(owa, src);
return dst;
}
void onewayalloc_destroy(ONEWAYALLOC *owa) {
if(!owa) return;
OWA_PAGE *head = (OWA_PAGE *)owa;
//netdata_log_info("OWA: %zu allocations of %zu total bytes, in %zu pages of %zu total bytes",
// head->stats_mallocs_made, head->stats_mallocs_size,
// head->stats_pages, head->stats_pages_size);
size_t total_size = 0;
OWA_PAGE *page = head;
while(page) {
total_size += page->size;
OWA_PAGE *p = page;
page = page->next;
// munmap(p, p->size);
freez(p);
}
__atomic_sub_fetch(&onewayalloc_total_memory, total_size, __ATOMIC_RELAXED);
}