0
0
Fork 0
mirror of https://github.com/netdata/netdata.git synced 2025-04-10 16:17:36 +00:00
netdata_netdata/src/ml/ml_enums.h
Costa Tsaousis 5f72d4279b
Streaming improvements No 3 ()
* ML uses synchronous queries

* do not call malloc_trim() to free memory, since to locks everything

* Reschedule dimensions for training from worker threads.

* when we collect or read from the database, it is SAMPLES. When we generate points for a chart is POINTS

* keep the receiver send buffer 10x the default

* support autoscaling stream circular buffers

* nd_poll() prefers sending data vs receiving data - in an attempt to dequeue as soon as possible

* fix last commit

* allow removing receiver and senders inline, if the stream thread is not working on them

* fix logs

* Revert "nd_poll() prefers sending data vs receiving data - in an attempt to dequeue as soon as possible"

This reverts commit 51539a97da.

* do not access receiver or sender after it has been removed

* open cache hot2clean

* open cache hot2clean does not need flushing

* use aral for extent pages up to 65k

* track aral malloc and mmap allocations separately; add 8192 as a possible value to PGD

* do not evict too frequently if not needed

* fix aral metrics

* fix aral metrics again

* accurate accounting of memory for dictionaries, strings, labels and MRG

* log during shutdown the progress of dbengine flushing

* move metasync shutfown after dbengine

* max iterations per I/O events

* max iterations per I/O events - break the loop

* max iterations per I/O events - break the loop - again

* disable inline evictions for all caches

* when writing to sockets, send everything that can be sent

* cleanup code to trigger evictions

* fix calculation of eviction size

* fix calculation of eviction size once more

* fix calculation of eviction size once more - again

* ml and replication stop while backfilling is running

* process opcodes while draining the sockets; log with limit when asking to disconnect a node

* fix log

* ml stops when replication queries are running

* report pgd_padding to pulse

* aral precise memory accounting

* removed all alignas() and fix the 2 issues that resulted in unaligned memory accesses (one in mqtt and another in streaming)

* remove the bigger sizes from PGD, but keep multiples of gorilla buffers

* exclude judy from sanitizers

* use 16 bytes alignment on 32 bit machines

* internal check about memory alignment

* experiment: do not allow more children to connect while there is backfilling or replication queries running

* when the node is initializing, retry in 30 seconds

* connector cleanup and isolation of control logic about enabling/disabling various parts

* stop also health queries while backfilling is running

* tuning

* drain the input

* improve interactivity when suspending

* more interactive stream_control

* debug logs to find the connection issue

* abstracted everything about stream control

* Add ml_host_{start,stop} again.

* Do not create/update anomaly-detection charts when ML is not running for a host.

* rrdhost flag RECEIVER_DISCONNECTED has been reversed to COLLECTOR_ONLINE and has been used for localhost and virtual hosts too, to have a single point of truth about the availability of collected data or not

* ml_host_start() and ml_host_stop() are used by streaming receivers; ml_host_start() is used for localhost and virtual hosts

* fixed typo

* allow up to 3 backfills at a time

* add throttling based on user queries

* restore cache line paddings

* unify streaming logs to make it easier to grep logs

* tuning of stream_control

* more logs unification

* use mallocz_release_as_much_memory_to_the_system() under extreme conditions

* do not rely on the response code of evict_pages()

* log the gap of the database every time a node is connected

* updated ram requirements

---------

Co-authored-by: vkalintiris <vasilis@netdata.cloud>
2024-12-11 18:02:17 +02:00

66 lines
1.8 KiB
C

// SPDX-License-Identifier: GPL-3.0-or-later
#ifndef NETDATA_ML_ENUMS_H
#define NETDATA_ML_ENUMS_H
enum ml_metric_type {
// The dimension has constant values, no need to train
METRIC_TYPE_CONSTANT,
// The dimension's values fluctuate, we need to generate a model
METRIC_TYPE_VARIABLE,
};
const char *ml_metric_type_to_string(enum ml_metric_type mt);
enum ml_machine_learning_status {
// Enable training/prediction
MACHINE_LEARNING_STATUS_ENABLED,
// Disable because configuration pattern matches the chart's id
MACHINE_LEARNING_STATUS_DISABLED_DUE_TO_EXCLUDED_CHART,
};
const char *ml_machine_learning_status_to_string(enum ml_machine_learning_status mls);
enum ml_training_status {
// We don't have a model for this dimension
TRAINING_STATUS_UNTRAINED,
// Have a valid, up-to-date model
TRAINING_STATUS_TRAINED,
// Have a valid, up-to-date model that is silenced because its too noisy
TRAINING_STATUS_SILENCED,
};
const char *ml_training_status_to_string(enum ml_training_status ts);
enum ml_worker_result {
// We managed to create a KMeans model
ML_WORKER_RESULT_OK,
// Could not query DB with a correct time range
ML_WORKER_RESULT_INVALID_QUERY_TIME_RANGE,
// Did not gather enough data from DB to run KMeans
ML_WORKER_RESULT_NOT_ENOUGH_COLLECTED_VALUES,
// Acquired a null dimension
ML_WORKER_RESULT_NULL_ACQUIRED_DIMENSION,
// Chart is under replication
ML_WORKER_RESULT_CHART_UNDER_REPLICATION,
};
const char *ml_worker_result_to_string(enum ml_worker_result tr);
enum ml_queue_item_type {
ML_QUEUE_ITEM_TYPE_CREATE_NEW_MODEL,
ML_QUEUE_ITEM_TYPE_ADD_EXISTING_MODEL,
ML_QUEUE_ITEM_STOP_REQUEST,
};
const char *ml_queue_item_type_to_string(enum ml_queue_item_type qit);
#endif /* NETDATA_ML_ENUMS_H */