0
0
Fork 0
mirror of https://github.com/netdata/netdata.git synced 2025-04-21 20:23:00 +00:00

Add message queue statistics ()

* Add IPC message queue charts

* Add obsolete flag for dimensions

* Delete obsolete dimensions from memory

* Remove files for obsolete dimensions, filter requests

* Make empty charts obsolete

* Minimize obsolete dimension checks

* Limit the number of dimensions in memory

* Remove obsolete dimensions on netdata exit

* Update documentation

* Move flag to the end

* Fix typo

* Fix typo
This commit is contained in:
Vladimir Kobal 2019-02-11 13:24:24 +02:00 committed by GitHub
parent cbe45897de
commit 2f6f8155db
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
14 changed files with 431 additions and 167 deletions
backends/prometheus
collectors
daemon/config
database
streaming
web/api
exporters/shell
formatters

View file

@ -291,7 +291,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER
// for each dimension
RRDDIM *rd;
rrddim_foreach_read(rd, st) {
if(rd->collections_counter) {
if(rd->collections_counter && !rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) {
char dimension[PROMETHEUS_ELEMENT_MAX + 1];
char *suffix = "";

View file

@ -55,8 +55,8 @@
#define NETDATA_CHART_PRIO_SYSTEM_ENTROPY 1000
#define NETDATA_CHART_PRIO_SYSTEM_UPTIME 1000
#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_QUEUES 990 // freebsd only
#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_MESSAGES 1000 // freebsd only
#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_SIZE 1100 // freebsd only
#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_MESSAGES 1000
#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_SIZE 1100
#define NETDATA_CHART_PRIO_SYSTEM_IPC_SEMAPHORES 1000
#define NETDATA_CHART_PRIO_SYSTEM_IPC_SEM_ARRAYS 1000
#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SEGS 1000 // freebsd only

View file

@ -15,7 +15,7 @@ plugin|language|O/S|description
[node.d.plugin](../node.d.plugin/)|`node.js`|all|a **plugin orchestrator** for data collection modules written in `node.js`.
[python.d.plugin](../python.d.plugin/)|`python`|all|a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported).
Plugin orchestrators may also be described as **modular plugins**. They are modular since they accept custom made modules to be included. Writing modules for these plugins is easier than accessing the native netdata API directly. You will find modules already available for each orchestrator under the directory of the particular modular plugin (e.g. under python.d.plugin for the python orchestrator).
Plugin orchestrators may also be described as **modular plugins**. They are modular since they accept custom made modules to be included. Writing modules for these plugins is easier than accessing the native netdata API directly. You will find modules already available for each orchestrator under the directory of the particular modular plugin (e.g. under python.d.plugin for the python orchestrator).
Each of these modular plugins has each own methods for defining modules. Please check the examples and their documentation.
## Motivation
@ -49,9 +49,9 @@ Plugins can create any number of charts with any number of dimensions each. Each
## Configuration
Netdata will supply the environment variables `NETDATA_USER_CONFIG_DIR` (for user supplied) and `NETDATA_STOCK_CONFIG_DIR` (for netdata supplied) configuration files to identify the directory where configuration files are stored. It is up to the plugin to read the configuration it needs.
Netdata will supply the environment variables `NETDATA_USER_CONFIG_DIR` (for user supplied) and `NETDATA_STOCK_CONFIG_DIR` (for netdata supplied) configuration files to identify the directory where configuration files are stored. It is up to the plugin to read the configuration it needs.
The `netdata.conf` section [plugins] section contains a list of all the plugins found at the system where netdata runs, with a boolean setting to enable them or not.
The `netdata.conf` section [plugins] section contains a list of all the plugins found at the system where netdata runs, with a boolean setting to enable them or not.
Example:
@ -59,7 +59,7 @@ Example:
[plugins]
# enable running new plugins = yes
# check for new plugins every = 60
# charts.d = yes
# fping = yes
# node.d = yes
@ -70,7 +70,7 @@ The setting `enable running new plugins` changes the default behavior for all ex
So if set to `no`, only the plugins that are explicitly set to `yes` will be run.
The setting `check for new plugins every` controls the time the directory `/usr/libexec/netdata/plugins.d`
will be rescanned for new plugins. So, new plugins can give added anytime.
will be rescanned for new plugins. So, new plugins can give added anytime.
For each of the external plugins enabled, another `netdata.conf` section
is created, in the form of `[plugin:NAME]`, where `NAME` is the name of the external plugin.
@ -82,7 +82,7 @@ For example, for `apps.plugin` the following section is available:
```
[plugin:apps]
# update every = 1
# command options =
# command options =
```
- `update every` controls the granularity of the external plugin.
@ -193,7 +193,7 @@ the template is:
is used to group charts together
(for example all eth0 charts should say: eth0),
if empty or missing, the `id` part of `type.id` will be used
this controls the sub-menu on the dashboard
- `context`
@ -233,7 +233,7 @@ the template is:
the template is:
> DIMENSION id [name [algorithm [multiplier [divisor [hidden]]]]]
> DIMENSION id [name [algorithm [multiplier [divisor [options]]]]]
where:
@ -283,10 +283,9 @@ the template is:
an integer value to divide the collected value,
if empty or missing, `1` is used
- `hidden`
- `options`
giving the keyword `hidden` will make this dimension hidden,
it will take part in the calculations but will not be presented in the chart
a space separated list of options, enclosed in quotes. Options supported: `obsolete` to mark a dimension as obsolete (netdata will delete it after some time) and `hidden` to make this dimension hidden, it will take part in the calculations but will not be presented in the chart.
#### VARIABLE
@ -390,7 +389,7 @@ or do not output the line at all.
4. **C**
Of course, C is the most efficient way of collecting data. This is why netdata itself is written in C.
## Writing Plugins Properly
There are a few rules for writing plugins properly:
@ -411,7 +410,7 @@ There are a few rules for writing plugins properly:
var update_every = argv[1] * 1000; /* seconds * 1000 = milliseconds */
readConfiguration();
if(!verifyWeCanCollectValues()) {
print "DISABLE";
exit(1);
@ -445,7 +444,7 @@ There are a few rules for writing plugins properly:
sleepMilliseconds(next_run - now);
now = currentTimeStampInMilliseconds();
}
/* calculate the time passed since the last run */
if ( loops > 0 )
dt_since_last_run = (now - last_run) * 1000; /* in microseconds */

View file

@ -394,10 +394,17 @@ inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int
rrddim_flag_clear(rd, RRDDIM_FLAG_HIDDEN);
rrddim_flag_clear(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS);
if(options && *options) {
if(strstr(options, "obsolete") != NULL)
rrddim_is_obsolete(st, rd);
else
rrddim_isnot_obsolete(st, rd);
if(strstr(options, "hidden") != NULL) rrddim_flag_set(rd, RRDDIM_FLAG_HIDDEN);
if(strstr(options, "noreset") != NULL) rrddim_flag_set(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS);
if(strstr(options, "nooverflow") != NULL) rrddim_flag_set(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS);
}
else {
rrddim_isnot_obsolete(st, rd);
}
}
else if(likely(hash == VARIABLE_HASH && !strcmp(s, PLUGINSD_KEYWORD_VARIABLE))) {
char *name = words[1];

View file

@ -20,6 +20,7 @@
- `/proc/loadavg` (system load and total processes running)
- `/proc/sys/kernel/random/entropy_avail` (random numbers pool availability - used in cryptography)
- `/sys/class/power_supply` (power supply properties)
- `ipc` (IPC semaphores and message queues)
- `ksm` Kernel Same-Page Merging performance (several files under `/sys/kernel/mm/ksm`).
- `netdata` (internal netdata resources utilization)
@ -343,4 +344,18 @@ corresponding `min` or `empty` attribute, then Netdata will still provide
the corresponding `min` or `empty`, which will then always read as zero.
This way, alerts which match on these will still work.
## IPC
This module monitors the number of semaphores, semaphore arrays, number of messages in message queues, and amount of memory used by message queues. As far as the message queue charts are dynamic, sane limits are applied for the number of dimensions per chart (the limit is configurable).
#### configuration
```
[plugin:proc:ipc]
# semaphore totals = yes
# message queues = yes
# msg filename to monitor = /proc/sysvipc/msg
# max dimensions in memory allowed = 50
```
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fproc.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()

View file

@ -53,6 +53,18 @@ union semun {
};
#endif
struct message_queue {
unsigned long long id;
int found;
RRDDIM *rd_messages;
RRDDIM *rd_bytes;
unsigned long long messages;
unsigned long long bytes;
struct message_queue * next;
};
static inline int ipc_sem_get_limits(struct ipc_limits *lim) {
static procfile *ff = NULL;
static int error_shown = 0;
@ -162,102 +174,291 @@ static inline int ipc_sem_get_status(struct ipc_status *st) {
return 0;
}
int ipc_msq_get_info(char *msg_filename, struct message_queue **message_queue_root) {
static procfile *ff;
struct message_queue *msq;
if(unlikely(!ff)) {
ff = procfile_open(config_get("plugin:proc:ipc", "msg filename to monitor", msg_filename), " \t:", PROCFILE_FLAG_DEFAULT);
if(unlikely(!ff)) return 1;
}
ff = procfile_readall(ff);
if(unlikely(!ff)) return 1;
size_t lines = procfile_lines(ff);
size_t words = 0;
if(unlikely(lines < 2)) {
error("Cannot read %s. Expected 2 or more lines, read %zu.", ff->filename, lines);
return 1;
}
// loop through all lines except the first and the last ones
size_t l;
for(l = 1; l < lines - 1; l++) {
words = procfile_linewords(ff, l);
if(unlikely(words < 2)) continue;
if(unlikely(words < 14)) {
error("Cannot read %s line. Expected 14 params, read %zu.", ff->filename, words);
continue;
}
// find the id in the linked list or create a new stucture
int found = 0;
unsigned long long id = str2ull(procfile_lineword(ff, l, 1));
for(msq = *message_queue_root; msq ; msq = msq->next) {
if(unlikely(id == msq->id)) {
found = 1;
break;
}
}
if(unlikely(!found)) {
msq = callocz(1, sizeof(struct message_queue));
msq->next = *message_queue_root;
*message_queue_root = msq;
msq->id = id;
}
msq->messages = str2ull(procfile_lineword(ff, l, 4));
msq->bytes = str2ull(procfile_lineword(ff, l, 3));
msq->found = 1;
}
return 0;
}
int do_ipc(int update_every, usec_t dt) {
(void)dt;
static int initialized = 0, read_limits_next = -1;
static int do_sem = -1, do_msg = -1;
static int read_limits_next = -1;
static struct ipc_limits limits;
static struct ipc_status status;
static RRDVAR *arrays_max = NULL, *semaphores_max = NULL;
static RRDSET *st_semaphores = NULL, *st_arrays = NULL;
static RRDDIM *rd_semaphores = NULL, *rd_arrays = NULL;
static char *msg_filename = NULL;
static struct message_queue *message_queue_root = NULL;
static long long dimensions_limit;
if(unlikely(do_sem == -1)) {
do_sem = config_get_boolean("plugin:proc:ipc", "semaphore totals", CONFIG_BOOLEAN_YES);
do_msg = config_get_boolean("plugin:proc:ipc", "message queues", CONFIG_BOOLEAN_YES);
char filename[FILENAME_MAX + 1];
snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/sysvipc/msg");
msg_filename = config_get("plugin:proc:ipc", "msg filename to monitor", filename);
dimensions_limit = config_get_number("plugin:proc:ipc", "max dimensions in memory allowed", 50);
if(unlikely(!initialized)) {
initialized = 1;
// make sure it works
if(ipc_sem_get_limits(&limits) == -1) {
error("unable to fetch semaphore limits");
return 1;
do_sem = CONFIG_BOOLEAN_NO;
}
// make sure it works
if(ipc_sem_get_status(&status) == -1) {
else if(ipc_sem_get_status(&status) == -1) {
error("unable to fetch semaphore statistics");
return 1;
}
// create the charts
if(unlikely(!st_semaphores)) {
st_semaphores = rrdset_create_localhost(
"system"
, "ipc_semaphores"
, NULL
, "ipc semaphores"
, NULL
, "IPC Semaphores"
, "semaphores"
, PLUGIN_PROC_NAME
, "ipc"
, NETDATA_CHART_PRIO_SYSTEM_IPC_SEMAPHORES
, localhost->rrd_update_every
, RRDSET_TYPE_AREA
);
rd_semaphores = rrddim_add(st_semaphores, "semaphores", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
}
if(unlikely(!st_arrays)) {
st_arrays = rrdset_create_localhost(
"system"
, "ipc_semaphore_arrays"
, NULL
, "ipc semaphores"
, NULL
, "IPC Semaphore Arrays"
, "arrays"
, PLUGIN_PROC_NAME
, "ipc"
, NETDATA_CHART_PRIO_SYSTEM_IPC_SEM_ARRAYS
, localhost->rrd_update_every
, RRDSET_TYPE_AREA
);
rd_arrays = rrddim_add(st_arrays, "arrays", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
}
// variables
semaphores_max = rrdvar_custom_host_variable_create(localhost, "ipc_semaphores_max");
arrays_max = rrdvar_custom_host_variable_create(localhost, "ipc_semaphores_arrays_max");
}
if(unlikely(read_limits_next < 0)) {
if(unlikely(ipc_sem_get_limits(&limits) == -1)) {
error("Unable to fetch semaphore limits.");
do_sem = CONFIG_BOOLEAN_NO;
}
else {
if(semaphores_max) rrdvar_custom_host_variable_set(localhost, semaphores_max, limits.semmns);
if(arrays_max) rrdvar_custom_host_variable_set(localhost, arrays_max, limits.semmni);
// create the charts
if(unlikely(!st_semaphores)) {
st_semaphores = rrdset_create_localhost(
"system"
, "ipc_semaphores"
, NULL
, "ipc semaphores"
, NULL
, "IPC Semaphores"
, "semaphores"
, PLUGIN_PROC_NAME
, "ipc"
, NETDATA_CHART_PRIO_SYSTEM_IPC_SEMAPHORES
, localhost->rrd_update_every
, RRDSET_TYPE_AREA
);
rd_semaphores = rrddim_add(st_semaphores, "semaphores", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
}
st_arrays->red = limits.semmni;
st_semaphores->red = limits.semmns;
if(unlikely(!st_arrays)) {
st_arrays = rrdset_create_localhost(
"system"
, "ipc_semaphore_arrays"
, NULL
, "ipc semaphores"
, NULL
, "IPC Semaphore Arrays"
, "arrays"
, PLUGIN_PROC_NAME
, "ipc"
, NETDATA_CHART_PRIO_SYSTEM_IPC_SEM_ARRAYS
, localhost->rrd_update_every
, RRDSET_TYPE_AREA
);
rd_arrays = rrddim_add(st_arrays, "arrays", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
}
read_limits_next = 60 / update_every;
// variables
semaphores_max = rrdvar_custom_host_variable_create(localhost, "ipc_semaphores_max");
arrays_max = rrdvar_custom_host_variable_create(localhost, "ipc_semaphores_arrays_max");
}
struct stat stbuf;
if (stat(msg_filename, &stbuf)) {
do_msg = CONFIG_BOOLEAN_NO;
}
if(unlikely(do_sem == CONFIG_BOOLEAN_NO && do_msg == CONFIG_BOOLEAN_NO)) {
error("ipc module disabled");
return 1;
}
}
else
read_limits_next--;
if(unlikely(ipc_sem_get_status(&status) == -1)) {
error("Unable to get semaphore statistics");
return 0;
if(likely(do_sem != CONFIG_BOOLEAN_NO)) {
if(unlikely(read_limits_next < 0)) {
if(unlikely(ipc_sem_get_limits(&limits) == -1)) {
error("Unable to fetch semaphore limits.");
}
else {
if(semaphores_max) rrdvar_custom_host_variable_set(localhost, semaphores_max, limits.semmns);
if(arrays_max) rrdvar_custom_host_variable_set(localhost, arrays_max, limits.semmni);
st_arrays->red = limits.semmni;
st_semaphores->red = limits.semmns;
read_limits_next = 60 / update_every;
}
}
else
read_limits_next--;
if(unlikely(ipc_sem_get_status(&status) == -1)) {
error("Unable to get semaphore statistics");
return 0;
}
if(st_semaphores->counter_done) rrdset_next(st_semaphores);
rrddim_set_by_pointer(st_semaphores, rd_semaphores, status.semaem);
rrdset_done(st_semaphores);
if(st_arrays->counter_done) rrdset_next(st_arrays);
rrddim_set_by_pointer(st_arrays, rd_arrays, status.semusz);
rrdset_done(st_arrays);
}
if(st_semaphores->counter_done) rrdset_next(st_semaphores);
rrddim_set_by_pointer(st_semaphores, rd_semaphores, status.semaem);
rrdset_done(st_semaphores);
// --------------------------------------------------------------------
if(st_arrays->counter_done) rrdset_next(st_arrays);
rrddim_set_by_pointer(st_arrays, rd_arrays, status.semusz);
rrdset_done(st_arrays);
if(likely(do_msg != CONFIG_BOOLEAN_NO)) {
static RRDSET *st_msq_messages = NULL, *st_msq_bytes = NULL;
int ret = ipc_msq_get_info(msg_filename, &message_queue_root);
if(!ret && message_queue_root) {
if(unlikely(!st_msq_messages))
st_msq_messages = rrdset_create_localhost(
"system"
, "message_queue_messages"
, NULL
, "ipc message queues"
, NULL
, "IPC Message Queue Number of Messages"
, "messages"
, PLUGIN_PROC_NAME
, "ipc"
, NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_MESSAGES
, update_every
, RRDSET_TYPE_STACKED
);
else
rrdset_next(st_msq_messages);
if(unlikely(!st_msq_bytes))
st_msq_bytes = rrdset_create_localhost(
"system"
, "message_queue_bytes"
, NULL
, "ipc message queues"
, NULL
, "IPC Message Queue Used Bytes"
, "bytes"
, PLUGIN_PROC_NAME
, "ipc"
, NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_SIZE
, update_every
, RRDSET_TYPE_STACKED
);
else
rrdset_next(st_msq_bytes);
struct message_queue *msq = message_queue_root, *msq_prev = NULL;
while(likely(msq)){
if(likely(msq->found)) {
if(unlikely(!msq->rd_messages || !msq->rd_bytes)) {
char id[RRD_ID_LENGTH_MAX + 1];
snprintfz(id, RRD_ID_LENGTH_MAX, "%llu", msq->id);
if(likely(!msq->rd_messages)) msq->rd_messages = rrddim_add(st_msq_messages, id, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
if(likely(!msq->rd_bytes)) msq->rd_bytes = rrddim_add(st_msq_bytes, id, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
}
rrddim_set_by_pointer(st_msq_messages, msq->rd_messages, msq->messages);
rrddim_set_by_pointer(st_msq_bytes, msq->rd_bytes, msq->bytes);
msq->found = 0;
}
else {
rrddim_is_obsolete(st_msq_messages, msq->rd_messages);
rrddim_is_obsolete(st_msq_bytes, msq->rd_bytes);
// remove message queue from the linked list
if(!msq_prev)
message_queue_root = msq->next;
else
msq_prev->next = msq->next;
freez(msq);
msq = NULL;
}
if(likely(msq)) {
msq_prev = msq;
msq = msq->next;
}
else if(!msq_prev)
msq = message_queue_root;
else
msq = msq_prev->next;
}
rrdset_done(st_msq_messages);
rrdset_done(st_msq_bytes);
long long dimensions_num = 0;
RRDDIM *rd;
rrdset_rdlock(st_msq_messages);
rrddim_foreach_read(rd, st_msq_messages) dimensions_num++;
rrdset_unlock(st_msq_messages);
if(unlikely(dimensions_num > dimensions_limit)) {
info("Message queue statistics has been disabled");
info("There are %lld dimensions in memory but limit was set to %lld", dimensions_num, dimensions_limit);
rrdset_is_obsolete(st_msq_messages);
rrdset_is_obsolete(st_msq_bytes);
st_msq_messages = NULL;
st_msq_bytes = NULL;
do_msg = CONFIG_BOOLEAN_NO;
}
else if(unlikely(!message_queue_root)) {
info("Making chart %s (%s) obsolete since it does not have any dimensions", st_msq_messages->name, st_msq_messages->id);
rrdset_is_obsolete(st_msq_messages);
st_msq_messages = NULL;
info("Making chart %s (%s) obsolete since it does not have any dimensions", st_msq_bytes->name, st_msq_bytes->id);
rrdset_is_obsolete(st_msq_bytes);
st_msq_bytes = NULL;
}
}
}
return 0;
}

View file

@ -12,9 +12,9 @@ This config file **is not needed by default**. Netdata works fine out of the box
2. `[web]` to [configure the web server](../../web/server).
3. `[plugins]` to [configure](#plugins-section-options) which [collectors](../../collectors) to use and PATH settings.
4. `[health]` to [configure](#health-section-options) general settings for [health monitoring](../../health)
5. `[registry]` for the [netdata registry](../../registry).
5. `[registry]` for the [netdata registry](../../registry).
6. `[backend]` to set up [streaming and replication](../../streaming) options.
7. `[statsd]` for the general settings of the [stats.d.plugin](../../collectors/statsd.plugin).
7. `[statsd]` for the general settings of the [stats.d.plugin](../../collectors/statsd.plugin).
8. `[plugin:NAME]` sections for each collector plugin, under the comment [Per plugin configuration](#per-plugin-configuration).
9. `[CHART_NAME]` sections for each chart defined, under the comment [Per chart configuration](#per-chart-configuration).
@ -46,33 +46,33 @@ process scheduling policy | `keep` | See [netdata process scheduling policy](..
OOM score | `1000` | See [OOM score](../#oom-score)
glibc malloc arena max for plugins | `1` | See [Virtual memory](../#virtual-memory).
glibc malloc arena max for netdata | `1` | See [Virtual memory](../#virtual-memory).
hostname | auto-detected | The hostname of the computer running netdata.
history | `3996` | The number of entries the netdata daemon will by default keep in memory for each chart dimension. This setting can also be configured per chart. Check [Memory Requirements](../../database/#database) for more information.
update every | `1` | The frequency in seconds, for data collection. For more information see [Performance](../../docs/Performance.md#performance).
config directory | `/etc/netdata` | The directory configuration files are kept.
stock config directory | `/usr/lib/netdata/conf.d` |
log directory | `/var/log/netdata` | The directory in which the [log files](../#log-files) are kept.
web files directory | `/usr/share/netdata/web` | The directory the web static files are kept.
cache directory | `/var/cache/netdata` | The directory the memory database will be stored if and when netdata exits. Netdata will re-read the database when it will start again, to continue from the same point.
hostname | auto-detected | The hostname of the computer running netdata.
history | `3996` | The number of entries the netdata daemon will by default keep in memory for each chart dimension. This setting can also be configured per chart. Check [Memory Requirements](../../database/#database) for more information.
update every | `1` | The frequency in seconds, for data collection. For more information see [Performance](../../docs/Performance.md#performance).
config directory | `/etc/netdata` | The directory configuration files are kept.
stock config directory | `/usr/lib/netdata/conf.d` |
log directory | `/var/log/netdata` | The directory in which the [log files](../#log-files) are kept.
web files directory | `/usr/share/netdata/web` | The directory the web static files are kept.
cache directory | `/var/cache/netdata` | The directory the memory database will be stored if and when netdata exits. Netdata will re-read the database when it will start again, to continue from the same point.
lib directory | `/var/lib/netdata` | Contains the alarm log and the netdata instance guid.
home directory | `/var/cache/netdata` | Contains the db files for the collected metrics
plugins directory | `"/usr/libexec/netdata/plugins.d" "/etc/netdata/custom-plugins.d"` | The directory plugin programs are kept. This setting supports multiple directories, space separated. If any directory path contains spaces, enclose it in single or double quotes.
memory mode | `save` | When set to `save` netdata will save its round robin database on exit and load it on startup. When set to `map` the cache files will be updated in real time (check `man mmap` - do not set this on systems with heavy load or slow disks - the disks will continuously sync the in-memory database of netdata). When set to `ram` the round robin database will be temporary and it will be lost when netdata exits. `none` disables the database at this host. This also disables health monitoring (there cannot be health monitoring without a database). host access prefix | | This is used in docker environments where /proc, /sys, etc have to be accessed via another path. You may also have to set SYS_PTRACE capability on the docker for this work. Check [issue 43](https://github.com/netdata/netdata/issues/43).
memory deduplication (ksm) | `yes` | When set to `yes`, netdata will offer its in-memory round robin database to kernel same page merging (KSM) for deduplication. For more information check [Memory Deduplication - Kernel Same Page Merging - KSM](../../database/#ksm)
TZ environment variable | `:/etc/localtime` | Where to find the timezone
timezone | auto-detected | The timezone retrieved from the environment variable
debug flags | `0x0000000000000000` | Bitmap of debug options to enable. For more information check [Tracing Options](../#debugging).
debug log | `/var/log/netdata/debug.log` | The filename to save debug information. This file will not be created is debugging is not enabled. You can also set it to `syslog` to send the debug messages to syslog, or `none` to disable this log. For more information check [Tracing Options](../#debugging).
error log | `/var/log/netdata/error.log` | The filename to save error messages for netdata daemon and all plugins (`stderr` is sent here for all netdata programs, including the plugins). You can also set it to `syslog` to send the errors to syslog, or `none` to disable this log.
access log | `/var/log/netdata/access.log` | The filename to save the log of web clients accessing netdata charts. You can also set it to `syslog` to send the access log to syslog, or `none` to disable this log.
plugins directory | `"/usr/libexec/netdata/plugins.d" "/etc/netdata/custom-plugins.d"` | The directory plugin programs are kept. This setting supports multiple directories, space separated. If any directory path contains spaces, enclose it in single or double quotes.
memory mode | `save` | When set to `save` netdata will save its round robin database on exit and load it on startup. When set to `map` the cache files will be updated in real time (check `man mmap` - do not set this on systems with heavy load or slow disks - the disks will continuously sync the in-memory database of netdata). When set to `ram` the round robin database will be temporary and it will be lost when netdata exits. `none` disables the database at this host. This also disables health monitoring (there cannot be health monitoring without a database). host access prefix | | This is used in docker environments where /proc, /sys, etc have to be accessed via another path. You may also have to set SYS_PTRACE capability on the docker for this work. Check [issue 43](https://github.com/netdata/netdata/issues/43).
memory deduplication (ksm) | `yes` | When set to `yes`, netdata will offer its in-memory round robin database to kernel same page merging (KSM) for deduplication. For more information check [Memory Deduplication - Kernel Same Page Merging - KSM](../../database/#ksm)
TZ environment variable | `:/etc/localtime` | Where to find the timezone
timezone | auto-detected | The timezone retrieved from the environment variable
debug flags | `0x0000000000000000` | Bitmap of debug options to enable. For more information check [Tracing Options](../#debugging).
debug log | `/var/log/netdata/debug.log` | The filename to save debug information. This file will not be created is debugging is not enabled. You can also set it to `syslog` to send the debug messages to syslog, or `none` to disable this log. For more information check [Tracing Options](../#debugging).
error log | `/var/log/netdata/error.log` | The filename to save error messages for netdata daemon and all plugins (`stderr` is sent here for all netdata programs, including the plugins). You can also set it to `syslog` to send the errors to syslog, or `none` to disable this log.
access log | `/var/log/netdata/access.log` | The filename to save the log of web clients accessing netdata charts. You can also set it to `syslog` to send the access log to syslog, or `none` to disable this log.
errors flood protection period | `1200` | UNUSED - Length of period (in sec) during which the number of errors should not exceed the `errors to trigger flood protection`.
errors to trigger flood protection | `200` | UNUSED - Number of errors written to the log in `errors flood protection period` sec before flood protection is activated.
run as user | `netdata` | The user netdata will run as.
pthread stack size | auto-detected |
cleanup obsolete charts after seconds | `3600` | See [monitoring ephemeral containers](../../collectors/cgroups.plugin/#monitoring-ephemeral-containers)
gap when lost iterations above | `1` |
run as user | `netdata` | The user netdata will run as.
pthread stack size | auto-detected |
cleanup obsolete charts after seconds | `3600` | See [monitoring ephemeral containers](../../collectors/cgroups.plugin/#monitoring-ephemeral-containers), also sets the timeout for cleaning up obsolete dimensions
gap when lost iterations above | `1` |
cleanup orphan hosts after seconds | `3600` | How long to wait until automatically removing from the DB a remote netdata host (slave) that is no longer sending data.
delete obsolete charts files | `yes` | See [monitoring ephemeral containers](../../collectors/cgroups.plugin/#monitoring-ephemeral-containers)
delete obsolete charts files | `yes` | See [monitoring ephemeral containers](../../collectors/cgroups.plugin/#monitoring-ephemeral-containers), also affects the deletion of files for obsolete dimensions
delete orphan hosts files | `yes` | Set to `no` to disable non-responsive host removal.
### [web] section options
@ -81,40 +81,40 @@ Refer to the [web server documentation](../../web/server)
### [plugins] section options
In this section you will see be a boolean (`yes`/`no`) option for each plugin (e.g. tc, cgroups, apps, proc etc.). Note that the configuration options in this section for the orchestrator plugins `python.d`, `charts.d` and `node.d` control **all the modules** written for that orchestrator. For instance, setting `python.d = no` means that all Python modules under `collectors/python.d.plugin` will be disabled.
In this section you will see be a boolean (`yes`/`no`) option for each plugin (e.g. tc, cgroups, apps, proc etc.). Note that the configuration options in this section for the orchestrator plugins `python.d`, `charts.d` and `node.d` control **all the modules** written for that orchestrator. For instance, setting `python.d = no` means that all Python modules under `collectors/python.d.plugin` will be disabled.
Additionally, there will be the following options:
setting | default | info
:------:|:-------:|:----
PATH environment variable | `auto-detected` |
PATH environment variable | `auto-detected` |
PYTHONPATH environment variable | | Used to set a custom python path
enable running new plugins | `yes` | When set to `yes`, netdata will enable detected plugins, even if they are not configured explicitly. Setting this to `no` will only enable plugins explicitly configirued in this file with a `yes`
enable running new plugins | `yes` | When set to `yes`, netdata will enable detected plugins, even if they are not configured explicitly. Setting this to `no` will only enable plugins explicitly configirued in this file with a `yes`
check for new plugins every | 60 | The time in seconds to check for new plugins in the plugins directory. This allows having other applications dynamically creating plugins for netdata.
checks | `no` | This is a debugging plugin for the internal latency
checks | `no` | This is a debugging plugin for the internal latency
### [health] section options
This section controls the general behavior of the health monitoring capabilities of Netdata.
This section controls the general behavior of the health monitoring capabilities of Netdata.
Specific alarms are configured in per-collector config files under the `health.d` directory. For more info, see [health monitoring](../../health/#health-monitoring).
Specific alarms are configured in per-collector config files under the `health.d` directory. For more info, see [health monitoring](../../health/#health-monitoring).
[Alarm notifications](../../health/notifications/#netdata-alarm-notifications) are configured in `health_alarm_notify.conf`.
[Alarm notifications](../../health/notifications/#netdata-alarm-notifications) are configured in `health_alarm_notify.conf`.
setting | default | info
:------:|:-------:|:----
enabled | `yes` | Set to `no` to disable all alarms and notifications
in memory max health log entries | 1000 | Size of the alarm history held in RAM
script to execute on alarm | `/usr/libexec/netdata/plugins.d/alarm-notify.sh` | The script that sends alarm notifications.
script to execute on alarm | `/usr/libexec/netdata/plugins.d/alarm-notify.sh` | The script that sends alarm notifications.
stock health configuration directory | `/usr/lib/netdata/conf.d/health.d` | Contains the stock alarm configuration files for each collector
health configuration directory | `/etc/netdata/health.d` | The directory containing the user alarm configuration files, to override the stock configurations
run at least every seconds | `10` | Controls how often all alarm conditions should be evaluated.
run at least every seconds | `10` | Controls how often all alarm conditions should be evaluated.
postpone alarms during hibernation for seconds | `60` | Prevents false alarms. May need to be increased if you get alarms during hibernation.
rotate log every lines | 2000 | Controls the number of alarm log entries stored in `<lib directory>/health-log.db`, where <lib directory> is the one configured in the [[global] section](#global-section-options)
### [registry] section options
To understand what this section is and how it should be configured, please refer to the [registry documentation](../../registry).
To understand what this section is and how it should be configured, please refer to the [registry documentation](../../registry).
### [backend]
@ -135,7 +135,7 @@ External plugins will have only 2 options at `netdata.conf`:
setting | default | info
:------:|:-------:|:----
update every|the value of `[global].update every` setting|The frequency in seconds the plugin should collect values. For more information check [Performance](../../docs/Performance.md#performance).
command options|*empty*|Additional command line options to pass to the plugin.
command options|*empty*|Additional command line options to pass to the plugin.
External plugins that need additional configuration may support a dedicated file in `/etc/netdata`. Check their documentation.

View file

@ -31,6 +31,7 @@ typedef struct alarm_entry ALARM_ENTRY;
extern int default_rrd_update_every;
extern int default_rrd_history_entries;
extern int gap_when_lost_iterations_above;
extern time_t rrdset_free_obsolete_time;
#define RRD_ID_LENGTH_MAX 200
@ -123,7 +124,8 @@ typedef struct rrdfamily RRDFAMILY;
typedef enum rrddim_flags {
RRDDIM_FLAG_NONE = 0,
RRDDIM_FLAG_HIDDEN = (1 << 0), // this dimension will not be offered to callers
RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS = (1 << 1) // do not offer RESET or OVERFLOW info to callers
RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS = (1 << 1), // do not offer RESET or OVERFLOW info to callers
RRDDIM_FLAG_OBSOLETE = (1 << 2) // this is marked by the collector/module as obsolete
} RRDDIM_FLAGS;
#ifdef HAVE_C___ATOMIC
@ -242,21 +244,22 @@ struct rrddim {
// and may lead to missing information.
typedef enum rrdset_flags {
RRDSET_FLAG_ENABLED = 1 << 0, // enables or disables a chart
RRDSET_FLAG_DETAIL = 1 << 1, // if set, the data set should be considered as a detail of another
// (the master data set should be the one that has the same family and is not detail)
RRDSET_FLAG_DEBUG = 1 << 2, // enables or disables debugging for a chart
RRDSET_FLAG_OBSOLETE = 1 << 3, // this is marked by the collector/module as obsolete
RRDSET_FLAG_BACKEND_SEND = 1 << 4, // if set, this chart should be sent to backends
RRDSET_FLAG_BACKEND_IGNORE = 1 << 5, // if set, this chart should not be sent to backends
RRDSET_FLAG_UPSTREAM_SEND = 1 << 6, // if set, this chart should be sent upstream (streaming)
RRDSET_FLAG_UPSTREAM_IGNORE = 1 << 7, // if set, this chart should not be sent upstream (streaming)
RRDSET_FLAG_UPSTREAM_EXPOSED = 1 << 8, // if set, we have sent this chart definition to netdata master (streaming)
RRDSET_FLAG_STORE_FIRST = 1 << 9, // if set, do not eliminate the first collection during interpolation
RRDSET_FLAG_HETEROGENEOUS = 1 << 10, // if set, the chart is not homogeneous (dimensions in it have multiple algorithms, multipliers or dividers)
RRDSET_FLAG_HOMEGENEOUS_CHECK = 1 << 11, // if set, the chart should be checked to determine if the dimensions as homogeneous
RRDSET_FLAG_HIDDEN = 1 << 12, // if set, do not show this chart on the dashboard, but use it for backends
RRDSET_FLAG_SYNC_CLOCK = 1 << 13, // if set, microseconds on next data collection will be ignored (the chart will be synced to now)
RRDSET_FLAG_ENABLED = 1 << 0, // enables or disables a chart
RRDSET_FLAG_DETAIL = 1 << 1, // if set, the data set should be considered as a detail of another
// (the master data set should be the one that has the same family and is not detail)
RRDSET_FLAG_DEBUG = 1 << 2, // enables or disables debugging for a chart
RRDSET_FLAG_OBSOLETE = 1 << 3, // this is marked by the collector/module as obsolete
RRDSET_FLAG_BACKEND_SEND = 1 << 4, // if set, this chart should be sent to backends
RRDSET_FLAG_BACKEND_IGNORE = 1 << 5, // if set, this chart should not be sent to backends
RRDSET_FLAG_UPSTREAM_SEND = 1 << 6, // if set, this chart should be sent upstream (streaming)
RRDSET_FLAG_UPSTREAM_IGNORE = 1 << 7, // if set, this chart should not be sent upstream (streaming)
RRDSET_FLAG_UPSTREAM_EXPOSED = 1 << 8, // if set, we have sent this chart definition to netdata master (streaming)
RRDSET_FLAG_STORE_FIRST = 1 << 9, // if set, do not eliminate the first collection during interpolation
RRDSET_FLAG_HETEROGENEOUS = 1 << 10, // if set, the chart is not homogeneous (dimensions in it have multiple algorithms, multipliers or dividers)
RRDSET_FLAG_HOMEGENEOUS_CHECK = 1 << 11, // if set, the chart should be checked to determine if the dimensions as homogeneous
RRDSET_FLAG_HIDDEN = 1 << 12, // if set, do not show this chart on the dashboard, but use it for backends
RRDSET_FLAG_SYNC_CLOCK = 1 << 13, // if set, microseconds on next data collection will be ignored (the chart will be synced to now)
RRDSET_FLAG_OBSOLETE_DIMENSIONS = 1 << 14 // this is marked by the collector/module when a chart has obsolete dimensions
} RRDSET_FLAGS;
#ifdef HAVE_C___ATOMIC
@ -846,6 +849,9 @@ extern RRDDIM *rrddim_find(RRDSET *st, const char *id);
extern int rrddim_hide(RRDSET *st, const char *id);
extern int rrddim_unhide(RRDSET *st, const char *id);
extern void rrddim_is_obsolete(RRDSET *st, RRDDIM *rd);
extern void rrddim_isnot_obsolete(RRDSET *st, RRDDIM *rd);
extern collected_number rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, collected_number value);
extern collected_number rrddim_set(RRDSET *st, const char *id, collected_number value);
@ -879,6 +885,7 @@ extern void rrdset_free(RRDSET *st);
extern void rrdset_reset(RRDSET *st);
extern void rrdset_save(RRDSET *st);
extern void rrdset_delete(RRDSET *st);
extern void rrdset_delete_obsolete_dimensions(RRDSET *st);
extern void rrdhost_cleanup_obsolete_charts(RRDHOST *host);

View file

@ -368,6 +368,18 @@ int rrddim_unhide(RRDSET *st, const char *id) {
return 0;
}
inline void rrddim_is_obsolete(RRDSET *st, RRDDIM *rd) {
debug(D_RRD_CALLS, "rrddim_is_obsolete() for chart %s, dimension %s", st->name, rd->name);
rrddim_flag_set(rd, RRDDIM_FLAG_OBSOLETE);
rrdset_flag_set(st, RRDSET_FLAG_OBSOLETE_DIMENSIONS);
}
inline void rrddim_isnot_obsolete(RRDSET *st, RRDDIM *rd) {
debug(D_RRD_CALLS, "rrddim_isnot_obsolete() for chart %s, dimension %s", st->name, rd->name);
rrddim_flag_clear(rd, RRDDIM_FLAG_OBSOLETE);
}
// ----------------------------------------------------------------------------
// RRDDIM - collect values for a dimension

View file

@ -665,6 +665,8 @@ void rrdhost_cleanup_charts(RRDHOST *host) {
if(rrdhost_delete_obsolete_charts && rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE))
rrdset_delete(st);
else if(rrdhost_delete_obsolete_charts && rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE_DIMENSIONS))
rrdset_delete_obsolete_dimensions(st);
else
rrdset_save(st);

View file

@ -417,6 +417,24 @@ void rrdset_delete(RRDSET *st) {
recursively_delete_dir(st->cache_dir, "left-over chart");
}
void rrdset_delete_obsolete_dimensions(RRDSET *st) {
RRDDIM *rd;
rrdset_check_rdlock(st);
info("Deleting dimensions of chart '%s' ('%s') from disk...", st->id, st->name);
rrddim_foreach_read(rd, st) {
if(rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) {
if(likely(rd->rrd_memory_mode == RRD_MEMORY_MODE_SAVE || rd->rrd_memory_mode == RRD_MEMORY_MODE_MAP)) {
info("Deleting dimension file '%s'.", rd->cache_filename);
if(unlikely(unlink(rd->cache_filename) == -1))
error("Cannot delete dimension file '%s'", rd->cache_filename);
}
}
}
}
// ----------------------------------------------------------------------------
// RRDSET - create a chart
@ -1303,6 +1321,11 @@ void rrdset_done(RRDSET *st) {
continue;
}
if(unlikely(rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE))) {
error("Dimension %s in chart '%s' has the OBSOLETE flag set, but it is collected.", rd->name, st->id);
rrddim_isnot_obsolete(st, rd);
}
#ifdef NETDATA_INTERNAL_CHECKS
rrdset_debug(st, "%s: START "
" last_collected_value = " COLLECTED_NUMBER_FORMAT
@ -1582,37 +1605,37 @@ void rrdset_done(RRDSET *st) {
// ALL DONE ABOUT THE DATA UPDATE
// --------------------------------------------------------------------
/*
// find if there are any obsolete dimensions (not updated recently)
if(unlikely(rrd_delete_unupdated_dimensions)) {
// find if there are any obsolete dimensions
time_t now = now_realtime_sec();
for( rd = st->dimensions; likely(rd) ; rd = rd->next )
if((rd->last_collected_time.tv_sec + (rrd_delete_unupdated_dimensions * st->update_every)) < st->last_collected_time.tv_sec)
if(unlikely(rrddim_flag_check(st, RRDSET_FLAG_OBSOLETE_DIMENSIONS))) {
rrddim_foreach_read(rd, st)
if(unlikely(rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)))
break;
if(unlikely(rd)) {
RRDDIM *last;
// there is dimension to free
// there is a dimension to free
// upgrade our read lock to a write lock
rrdset_unlock(st);
rrdset_wrlock(st);
for( rd = st->dimensions, last = NULL ; likely(rd) ; ) {
// remove it only it is not updated in rrd_delete_unupdated_dimensions seconds
if(unlikely((rd->last_collected_time.tv_sec + (rrd_delete_unupdated_dimensions * st->update_every)) < st->last_collected_time.tv_sec)) {
if(unlikely(rd->last_collected_time.tv_sec + rrdset_free_obsolete_time < now)) {
info("Removing obsolete dimension '%s' (%s) of '%s' (%s).", rd->name, rd->id, st->name, st->id);
if(likely(rd->rrd_memory_mode == RRD_MEMORY_MODE_SAVE || rd->rrd_memory_mode == RRD_MEMORY_MODE_MAP)) {
info("Deleting dimension file '%s'.", rd->cache_filename);
if(unlikely(unlink(rd->cache_filename) == -1))
error("Cannot delete dimension file '%s'", rd->cache_filename);
}
if(unlikely(!last)) {
st->dimensions = rd->next;
rd->next = NULL;
rrddim_free(st, rd);
rd = st->dimensions;
continue;
}
else {
last->next = rd->next;
rd->next = NULL;
rrddim_free(st, rd);
rd = last->next;
continue;
@ -1622,14 +1645,11 @@ void rrdset_done(RRDSET *st) {
last = rd;
rd = rd->next;
}
if(unlikely(!st->dimensions)) {
info("Disabling chart %s (%s) since it does not have any dimensions", st->name, st->id);
st->enabled = 0;
}
}
else {
rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE_DIMENSIONS);
}
}
*/
rrdset_unlock(st);

View file

@ -188,12 +188,13 @@ static inline void rrdpush_send_chart_definition_nolock(RRDSET *st) {
rrddim_foreach_read(rd, st) {
buffer_sprintf(
host->rrdpush_sender_buffer
, "DIMENSION \"%s\" \"%s\" \"%s\" " COLLECTED_NUMBER_FORMAT " " COLLECTED_NUMBER_FORMAT " \"%s %s\"\n"
, "DIMENSION \"%s\" \"%s\" \"%s\" " COLLECTED_NUMBER_FORMAT " " COLLECTED_NUMBER_FORMAT " \"%s %s %s\"\n"
, rd->id
, rd->name
, rrd_algorithm_name(rd->algorithm)
, rd->multiplier
, rd->divisor
, rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)?"obsolete":""
, rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN)?"hidden":""
, rrddim_flag_check(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS)?"noreset":""
);
@ -737,16 +738,16 @@ void *rrdpush_sender_thread(void *ptr) {
if(host->rrdpush_sender_socket != -1) {
char *error = NULL;
if (unlikely(ofd->revents & POLLERR))
error = "socket reports errors (POLLERR)";
else if (unlikely(ofd->revents & POLLHUP))
error = "connection closed by remote end (POLLHUP)";
else if (unlikely(ofd->revents & POLLNVAL))
error = "connection is invalid (POLLNVAL)";
if(unlikely(error)) {
debug(D_STREAM, "STREAM: %s - closing socket...", error);
error("STREAM %s [send to %s]: %s - reopening socket - we have sent %zu bytes on this connection.", host->hostname, connected_to, error, sent_bytes_on_this_connection);

View file

@ -39,7 +39,7 @@ void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, BUFFER *wb) {
// for each dimension
RRDDIM *rd;
rrddim_foreach_read(rd, st) {
if(rd->collections_counter) {
if(rd->collections_counter && !rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) {
char dimension[SHELL_ELEMENT_MAX + 1];
shell_name_copy(dimension, rd->name?rd->name:rd->id, SHELL_ELEMENT_MAX);
@ -126,7 +126,7 @@ void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, BUFFER *wb) {
// for each dimension
RRDDIM *rd;
rrddim_foreach_read(rd, st) {
if(rd->collections_counter) {
if(rd->collections_counter && !rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) {
buffer_sprintf(wb, "%s\n"
"\t\t\t\"%s\": {\n"

View file

@ -51,7 +51,7 @@ void rrdset2json(RRDSET *st, BUFFER *wb, size_t *dimensions_count, size_t *memor
size_t dimensions = 0;
RRDDIM *rd;
rrddim_foreach_read(rd, st) {
if(rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN)) continue;
if(rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN) || rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) continue;
memory += rd->memsize;