0
0
Fork 0
mirror of https://github.com/netdata/netdata.git synced 2025-04-02 20:48:06 +00:00

config parsers ()

* added parser for durations

* preliminary work for timeframes

* Update CMakeLists.txt

* updated parsing and generation for durations

* renames

* report parser errors; added compatibility to existing config_parse_duration()

* duration parsing is used on most netdata.conf and stream.conf entries

* more uses of duration parsing; simplification of stream.conf

* code cleanup

* more duration changes

* added html playground

* improved js code

* duration parsing applied to dbengine retention

* fixed doc

* simplified logic; added size parser

* added parsing for sizes

* renames and documentation updates

* hide appconfig internals from the rest of netdata

* fix crash on cleanup of streaming receivers

* fix buffer overflow in gorilla compression

* config return values are const

* ksm set to auto

* support reformatting migrated values

* removed obsolete metrics correlations settings

* split appconfig to multiple files

* durations documentation

* sizes documentation

* added backward compatibility in retention configuration

* provide description on migrations and reformattings

* config options are now a double linked list

* config sections are now a double linked list; config uses spinlocks; code cleanup and renames

* added data type to all config options

* update data types

* split appconfig api to multiple files

* code cleanup and renames

* removed size units above PiB

* Revert "fix buffer overflow in gorilla compression"

This reverts commit 3d5c48e84b.

* appconfig internal api changes
This commit is contained in:
Costa Tsaousis 2024-09-04 14:42:01 +03:00 committed by GitHub
parent d8c5109a00
commit a399128dbf
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
155 changed files with 3489 additions and 1944 deletions
CMakeLists.txtREADME.md
docs
deployment-guides
netdata-agent
configuration/optimizing-metrics-database
sizing-netdata-agents
observability-centralization-points/metrics-centralization-points
integrations
packaging
src
aclk
claim
collectors
daemon
database
exporting
health
libnetdata

View file

@ -716,6 +716,8 @@ set(LIBNETDATA_FILES
src/libnetdata/dictionary/dictionary-callbacks.h
src/libnetdata/linked-lists.h
src/libnetdata/storage-point.h
src/libnetdata/parsers/parsers.h
src/libnetdata/parsers/duration.c
src/libnetdata/bitmap64.h
src/libnetdata/os/gettid.c
src/libnetdata/os/gettid.h
@ -762,6 +764,31 @@ set(LIBNETDATA_FILES
src/libnetdata/paths/paths.c
src/libnetdata/paths/paths.h
src/libnetdata/json/json-c-parser-inline.c
src/libnetdata/parsers/duration.h
src/libnetdata/parsers/timeframe.c
src/libnetdata/parsers/timeframe.h
src/libnetdata/parsers/size.c
src/libnetdata/parsers/size.h
src/libnetdata/libjudy/judy-malloc.c
src/libnetdata/libjudy/judy-malloc.h
src/libnetdata/config/appconfig_internals.h
src/libnetdata/config/appconfig_exporters.c
src/libnetdata/config/appconfig_conf_file.c
src/libnetdata/config/appconfig_cleanup.c
src/libnetdata/config/appconfig_sections.c
src/libnetdata/config/appconfig_options.c
src/libnetdata/config/appconfig_migrate.c
src/libnetdata/config/appconfig_traversal.c
src/libnetdata/config/appconfig_api_sizes.c
src/libnetdata/config/appconfig_api_sizes.h
src/libnetdata/config/appconfig_api_durations.c
src/libnetdata/config/appconfig_api_durations.h
src/libnetdata/config/appconfig_api_numbers.c
src/libnetdata/config/appconfig_api_numbers.h
src/libnetdata/config/appconfig_api_text.c
src/libnetdata/config/appconfig_api_text.h
src/libnetdata/config/appconfig_api_boolean.c
src/libnetdata/config/appconfig_api_boolean.h
)
if(ENABLE_PLUGIN_EBPF)

View file

@ -398,7 +398,7 @@ This is what you should expect:
- For production systems, each Netdata Agent with default settings (everything enabled, ML, Health, DB) should consume about 5% CPU utilization of one core and about 150 MiB or RAM.
By using a Netdata parent and streaming all metrics to that parent, you can disable ML & health and use an ephemeral DB mode (like `alloc`) on the children, leading to utilization of about 1% CPU of a single core and 100 MiB of RAM. Of course, these depend on how many metrics are collected.
By using a Netdata parent and streaming all metrics to that parent, you can disable ML & health and use an ephemeral DB (like `alloc`) on the children, leading to utilization of about 1% CPU of a single core and 100 MiB of RAM. Of course, these depend on how many metrics are collected.
- For Netdata Parents, for about 1 to 2 million metrics, all collected every second, we suggest a server with 16 cores and 32GB RAM. Less than half of it will be used for data collection and ML. The rest will be available for queries.

View file

@ -95,23 +95,18 @@ On the Parent, edit `netdata.conf` by using the [edit-config](/docs/netdata-agen
```yaml
[db]
mode = dbengine
dbengine tier backfill = new
storage tiers = 3
# To allow memory pressure to offload index from ram
dbengine page descriptors in file mapped memory = yes
dbengine page cache size = 1.4GiB
# storage tier 0
update every = 1
dbengine multihost disk space MB = 12000
dbengine page cache size MB = 1400
dbengine tier 0 retention space = 12GiB
# storage tier 1
dbengine tier 1 page cache size MB = 512
dbengine tier 1 multihost disk space MB = 4096
dbengine tier 1 update every iterations = 60
dbengine tier 1 backfill = new
dbengine tier 1 retention space = 4GiB
# storage tier 2
dbengine tier 2 page cache size MB = 128
dbengine tier 2 multihost disk space MB = 2048
dbengine tier 2 update every iterations = 60
dbengine tier 2 backfill = new
dbengine tier 2 retention space = 2GiB
[ml]
# Enabled by default
# enabled = yes

View file

@ -7,9 +7,9 @@ space**. This provides greater control and helps you optimize storage usage for
| Tier | Resolution | Time Limit | Size Limit (min 256 MB) |
|:----:|:-------------------:|:----------:|:-----------------------:|
| 0 | high (per second) | 14 days | 1 GiB |
| 1 | middle (per minute) | 3 months | 1 GiB |
| 2 | low (per hour) | 2 years | 1 GiB |
| 0 | high (per second) | 14d | 1 GiB |
| 1 | middle (per minute) | 3mo | 1 GiB |
| 2 | low (per hour) | 2y | 1 GiB |
> **Note**: If a user sets a disk space size less than 256 MB for a tier, Netdata will automatically adjust it to 256 MB.
@ -17,7 +17,7 @@ With these defaults, Netdata requires approximately 4 GiB of storage space (incl
## Retention Settings
> **In a parent-child setup**, these settings manage the shared storage space utilized by the Netdata parent agent for
> **In a parent-child setup**, these settings manage the shared storage space used by the Netdata parent agent for
> storing metrics collected by both the parent and its child nodes.
You can fine-tune retention for each tier by setting a time limit or size limit. Setting a limit to 0 disables it,
@ -38,16 +38,16 @@ You can change these limits in `netdata.conf`:
storage tiers = 3
# Tier 0, per second data. Set to 0 for no limit.
dbengine tier 0 disk space MB = 1024
dbengine tier 0 retention days = 14
dbengine tier 0 retention size = 1GiB
dbengine tier 0 retention time = 14d
# Tier 1, per minute data. Set to 0 for no limit.
dbengine tier 1 disk space MB = 1024
dbengine tier 1 retention days = 90
dbengine tier 1 retention size = 1GiB
dbengine tier 1 retention time = 3mo
# Tier 2, per hour data. Set to 0 for no limit.
dbengine tier 2 disk space MB = 1024
dbengine tier 2 retention days = 730
dbengine tier 2 retention size = 1GiB
dbengine tier 2 retention time = 2y
```
## Monitoring Retention Utilization
@ -58,6 +58,24 @@ your storage space (disk space limits) and time (time limits) are used for metri
## Legacy configuration
### v1.99.0 and prior
Netdata prior to v2 supports the following configuration options in `netdata.conf`.
They have the same defaults as the latest v2, but the unit of each value is given in the option name, not at the value.
```
storage tiers = 3
# Tier 0, per second data. Set to 0 for no limit.
dbengine tier 0 disk space MB = 1024
dbengine tier 0 retention days = 14
# Tier 1, per minute data. Set to 0 for no limit.
dbengine tier 1 disk space MB = 1024
dbengine tier 1 retention days = 90
# Tier 2, per hour data. Set to 0 for no limit.
dbengine tier 2 disk space MB = 1024
dbengine tier 2 retention days = 730
```
### v1.45.6 and prior
Netdata versions prior to v1.46.0 relied on a disk space-based retention.
@ -76,13 +94,10 @@ You can change these limits in `netdata.conf`:
[db]
mode = dbengine
storage tiers = 3
# Tier 0, per second data
dbengine multihost disk space MB = 256
# Tier 1, per minute data
dbengine tier 1 multihost disk space MB = 1024
# Tier 2, per hour data
dbengine tier 2 multihost disk space MB = 1024
```
@ -113,6 +128,7 @@ If `dbengine disk space MB`(**deprecated**) is set to the default `256`, each in
which means the total disk space required to store all instances is,
roughly, `256 MiB * 1 parent * 4 child nodes = 1280 MiB`.
#### Backward compatibility
All existing metrics belonging to child nodes are automatically converted to legacy dbengine instances and the localhost

View file

@ -34,8 +34,8 @@ about 16 GiB
There are 2 cache sizes that can be configured in `netdata.conf`:
1. `[db].dbengine page cache size MB`: this is the main cache that keeps metrics data into memory. When data are not found in it, the extent cache is consulted, and if not found in that either, they are loaded from disk.
2. `[db].dbengine extent cache size MB`: this is the compressed extent cache. It keeps in memory compressed data blocks, as they appear on disk, to avoid reading them again. Data found in the extend cache but not in the main cache have to be uncompressed to be queried.
1. `[db].dbengine page cache size`: this is the main cache that keeps metrics data into memory. When data are not found in it, the extent cache is consulted, and if not found in that either, they are loaded from disk.
2. `[db].dbengine extent cache size`: this is the compressed extent cache. It keeps in memory compressed data blocks, as they appear on disk, to avoid reading them again. Data found in the extend cache but not in the main cache have to be uncompressed to be queried.
Both of them are dynamically adjusted to use some of the total memory computed above. The configuration in `netdata.conf` allows providing additional memory to them, increasing their caching efficiency.

View file

@ -45,6 +45,6 @@ The easiest way is to `rsync` the directory `/var/cache/netdata` from the existi
To configure retention at the new Netdata Parent, set in `netdata.conf` the following to at least the values the old Netdata Parent has:
- `[db].dbengine multihost disk space MB`, this is the max disk size for `tier0`. The default is 256MiB.
- `[db].dbengine tier 1 multihost disk space MB`, this is the max disk space for `tier1`. The default is 50% of `tier0`.
- `[db].dbengine tier 2 multihost disk space MB`, this is the max disk space for `tier2`. The default is 50% of `tier1`.
- `[db].dbengine tier 0 retention size`, this is the max disk size for `tier0`. The default is 1GiB.
- `[db].dbengine tier 1 retention size`, this is the max disk space for `tier1`. The default is 1GiB.
- `[db].dbengine tier 2 retention size`, this is the max disk space for `tier2`. The default is 1GiB.

View file

@ -45,13 +45,13 @@ The following `netdata.conf` configuration parameters affect replication.
On the receiving side (Netdata Parent):
- `[db].seconds to replicate` limits the maximum time to be replicated. The default is 1 day (86400 seconds). Keep in mind that replication is also limited by the `tier0` retention the sending side has.
- `[db].replication period` limits the maximum time to be replicated. The default is 1 day. Keep in mind that replication is also limited by the `tier0` retention the sending side has.
On the sending side (Netdata Children, or Netdata Parent when parents are clustered):
- `[db].replication threads` controls how many concurrent threads will be replicating metrics. The default is 1. Usually the performance is about 2 million samples per second per thread, so increasing this number may allow replication to progress faster between Netdata Parents.
- `[db].cleanup obsolete charts after secs` controls for how much time after metrics stop being collected will not be available for replication. The default is 1 hour (3600 seconds). If you plan to have scheduled maintenance on Netdata Parents of more than 1 hour, we recommend increasing this setting. Keep in mind however, that increasing this duration in highly ephemeral environments can have an impact on RAM utilization, since metrics will be considered as collected for longer durations.
- `[db].cleanup obsolete charts after` controls for how much time after metrics stop being collected will not be available for replication. The default is 1 hour (3600 seconds). If you plan to have scheduled maintenance on Netdata Parents of more than 1 hour, we recommend increasing this setting. Keep in mind however, that increasing this duration in highly ephemeral environments can have an impact on RAM utilization, since metrics will be considered as collected for longer durations.
## Monitoring Replication Progress

View file

@ -1308,7 +1308,7 @@ export const integrations = [
"most_popular": false
},
"overview": "# Disk space\n\nPlugin: diskspace.plugin\nModule: diskspace.plugin\n\n## Overview\n\nMonitor Disk space metrics for proficient storage management. Keep track of usage, free space, and error rates to prevent disk space issues.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin reads data from `/proc/self/mountinfo` and `/proc/diskstats file`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",
"setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:proc:diskspace]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can also specify per mount point `[plugin:proc:diskspace:mountpoint]`\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no |\n| check for new mount points every | Parse proc files frequency. | 15 | no |\n| exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no |\n| exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no |\n| exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no |\n| space usage for all disks | Define if plugin will show metrics for space usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n| inodes usage for all disks | Define if plugin will show metrics for inode usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n",
"setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:proc:diskspace]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can also specify per mount point `[plugin:proc:diskspace:mountpoint]`\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no |\n| check for new mount points every | Parse proc files frequency. | 15s | no |\n| exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no |\n| exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no |\n| exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no |\n| space usage for all disks | Define if plugin will show metrics for space usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n| inodes usage for all disks | Define if plugin will show metrics for inode usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n",
"troubleshooting": "",
"alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization |\n| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization |\n",
"metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per mount point\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mount_point | Path used to mount a filesystem |\n| filesystem | The filesystem used to format a partition. |\n| mount_root | Root directory where mount points are present. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n",
@ -17655,7 +17655,7 @@ export const integrations = [
"most_popular": false
},
"overview": "# Idle OS Jitter\n\nPlugin: idlejitter.plugin\nModule: idlejitter.plugin\n\n## Overview\n\nMonitor delays in timing for user processes caused by scheduling limitations to optimize the system to run latency sensitive applications with minimal jitter, improving consistency and quality of service.\n\n\nA thread is spawned that requests to sleep for fixed amount of time. When the system wakes it up, it measures how many microseconds have passed. The difference between the requested and the actual duration of the sleep, is the idle jitter. This is done dozens of times per second to ensure we have a representative sample.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration will run by default on all supported systems.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",
"setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThis integration only supports a single configuration option, and most users will not need to change it.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| loop time in ms | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20 | no |\n\n#### Examples\nThere are no configuration examples.\n\n",
"setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThis integration only supports a single configuration option, and most users will not need to change it.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| loop time | Specifies the target time for the data collection thread to sleep. | 20ms | no |\n\n#### Examples\nThere are no configuration examples.\n\n",
"troubleshooting": "",
"alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n",
"metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Idle OS Jitter instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.idlejitter | min, max, average | microseconds lost/s |\n\n",

View file

@ -1306,7 +1306,7 @@
"most_popular": false
},
"overview": "# Disk space\n\nPlugin: diskspace.plugin\nModule: diskspace.plugin\n\n## Overview\n\nMonitor Disk space metrics for proficient storage management. Keep track of usage, free space, and error rates to prevent disk space issues.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin reads data from `/proc/self/mountinfo` and `/proc/diskstats file`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",
"setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:proc:diskspace]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can also specify per mount point `[plugin:proc:diskspace:mountpoint]`\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no |\n| check for new mount points every | Parse proc files frequency. | 15 | no |\n| exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no |\n| exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no |\n| exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no |\n| space usage for all disks | Define if plugin will show metrics for space usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n| inodes usage for all disks | Define if plugin will show metrics for inode usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n\n#### Examples\nThere are no configuration examples.\n\n",
"setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:proc:diskspace]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can also specify per mount point `[plugin:proc:diskspace:mountpoint]`\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no |\n| check for new mount points every | Parse proc files frequency. | 15s | no |\n| exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no |\n| exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no |\n| exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no |\n| space usage for all disks | Define if plugin will show metrics for space usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n| inodes usage for all disks | Define if plugin will show metrics for inode usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n\n#### Examples\nThere are no configuration examples.\n\n",
"troubleshooting": "",
"alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization |\n| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization |\n",
"metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per mount point\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mount_point | Path used to mount a filesystem |\n| filesystem | The filesystem used to format a partition. |\n| mount_root | Root directory where mount points are present. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n",
@ -17653,7 +17653,7 @@
"most_popular": false
},
"overview": "# Idle OS Jitter\n\nPlugin: idlejitter.plugin\nModule: idlejitter.plugin\n\n## Overview\n\nMonitor delays in timing for user processes caused by scheduling limitations to optimize the system to run latency sensitive applications with minimal jitter, improving consistency and quality of service.\n\n\nA thread is spawned that requests to sleep for fixed amount of time. When the system wakes it up, it measures how many microseconds have passed. The difference between the requested and the actual duration of the sleep, is the idle jitter. This is done dozens of times per second to ensure we have a representative sample.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration will run by default on all supported systems.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n",
"setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThis integration only supports a single configuration option, and most users will not need to change it.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| loop time in ms | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20 | no |\n\n#### Examples\nThere are no configuration examples.\n\n",
"setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThis integration only supports a single configuration option, and most users will not need to change it.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| loop time | Specifies the target time for the data collection thread to sleep. | 20ms | no |\n\n#### Examples\nThere are no configuration examples.\n\n",
"troubleshooting": "",
"alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n",
"metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Idle OS Jitter instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.idlejitter | min, max, average | microseconds lost/s |\n\n",

View file

@ -2,9 +2,9 @@
enabled = {{ enabled }}
destination = {{ destination }}
api key = {{ api_key }}
timeout seconds = {{ timeout_seconds }}
timeout = {{ timeout_seconds }}
default port = {{ default_port }}
send charts matching = {{ send_charts_matching }}
buffer size bytes = {{ buffer_size_bytes }}
reconnect delay seconds = {{ reconnect_delay_seconds }}
reconnect delay = {{ reconnect_delay_seconds }}
initial clock resync iterations = {{ initial_clock_resync_iterations }}

View file

@ -1,7 +1,7 @@
[{{ api_key }}]
enabled = {{ enabled }}
allow from = {{ allow_from }}
default history = {{ default_history }}
retention = {{ default_history }}
health enabled by default = {{ health_enabled_by_default }}
default postpone alarms on connect seconds = {{ default_postpone_alarms_on_connect_seconds }}
postpone alerts on connect = {{ default_postpone_alarms_on_connect_seconds }}
multiple connections = {{ multiple_connections }}

View file

@ -87,9 +87,9 @@ On an existing installation, in order to connect it to Netdata Cloud you will ne
configs:
netdata:
data: |
[global]
memory mode = ram
history = 3600
[db]
db = ram
retention = 3600
[health]
enabled = no
```
@ -103,7 +103,7 @@ On an existing installation, in order to connect it to Netdata Cloud you will ne
> :bookmark_tabs: Info
>
> These override settings, along with the Helm chart's defaults, will retain an hour's worth of metrics (`history = 3600`, or `3600 seconds`) on each child node. Based on your metrics retention needs, and the resources available on your cluster, you may want to increase the `history` setting.
> These override settings, along with the Helm chart's defaults, will retain an hour's worth of metrics (`retention = 3600`, or `3600 seconds`) on each child node. Based on your metrics retention needs, and the resources available on your cluster, you may want to increase the `history` setting.
3. To apply these new settings, run:

View file

@ -28,8 +28,8 @@ const struct capability *aclk_get_agent_capas()
agent_capabilities[2].version = ml_capable() ? 1 : 0;
agent_capabilities[2].enabled = ml_enabled(localhost);
agent_capabilities[3].version = enable_metric_correlations ? metric_correlations_version : 0;
agent_capabilities[3].enabled = enable_metric_correlations;
agent_capabilities[3].version = metric_correlations_version;
agent_capabilities[3].enabled = 1;
agent_capabilities[7].enabled = localhost->health.health_enabled;
@ -44,9 +44,7 @@ struct capability *aclk_get_node_instance_capas(RRDHOST *host)
struct capability ni_caps[] = {
{ .name = "proto", .version = 1, .enabled = 1 },
{ .name = "ml", .version = ml_capable(), .enabled = ml_enabled(host) },
{ .name = "mc",
.version = enable_metric_correlations ? metric_correlations_version : 0,
.enabled = enable_metric_correlations },
{ .name = "mc", .version = metric_correlations_version, .enabled = 1 },
{ .name = "ctx", .version = 1, .enabled = 1 },
{ .name = "funcs", .version = functions ? 1 : 0, .enabled = functions ? 1 : 0 },
{ .name = "http_api_v2", .version = HTTP_API_V2_VERSION, .enabled = 1 },

View file

@ -401,18 +401,7 @@ bool claim_agent_from_environment(void) {
}
bool claim_agent_from_claim_conf(void) {
static struct config claim_config = {
.first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
.index = {
.avl_tree = {
.root = NULL,
.compar = appconfig_section_compare
},
.rwlock = AVL_LOCK_INITIALIZER
}
};
static struct config claim_config = APPCONFIG_INITIALIZER;
static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER;
bool ret = false;

View file

@ -2,18 +2,7 @@
#include "claim.h"
struct config cloud_config = {
.first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
.index = {
.avl_tree = {
.root = NULL,
.compar = appconfig_section_compare
},
.rwlock = AVL_LOCK_INITIALIZER
}
};
struct config cloud_config = APPCONFIG_INITIALIZER;
const char *cloud_config_url_get(void) {
return appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "url", DEFAULT_CLOUD_BASE_URL);

View file

@ -23,7 +23,7 @@ struct cgroup *discovered_cgroup_root = NULL;
char cgroup_chart_id_prefix[] = "cgroup_";
char services_chart_id_prefix[] = "systemd_";
char *cgroups_rename_script = NULL;
const char *cgroups_rename_script = NULL;
// Shared memory with information from detected cgroups
netdata_ebpf_cgroup_shm_t shm_cgroup_ebpf = {NULL, NULL};

View file

@ -273,7 +273,7 @@ struct discovery_thread {
extern struct discovery_thread discovery_thread;
extern char *cgroups_rename_script;
extern const char *cgroups_rename_script;
extern char cgroup_chart_id_prefix[];
extern char services_chart_id_prefix[];
extern uv_mutex_t cgroup_root_mutex;
@ -313,7 +313,7 @@ extern SIMPLE_PATTERN *enabled_cgroup_renames;
extern SIMPLE_PATTERN *systemd_services_cgroups;
extern SIMPLE_PATTERN *entrypoint_parent_process_comm;
extern char *cgroups_network_interface_script;
extern const char *cgroups_network_interface_script;
extern int cgroups_check;

View file

@ -39,7 +39,7 @@ SIMPLE_PATTERN *search_cgroup_paths = NULL;
SIMPLE_PATTERN *enabled_cgroup_renames = NULL;
SIMPLE_PATTERN *systemd_services_cgroups = NULL;
SIMPLE_PATTERN *entrypoint_parent_process_comm = NULL;
char *cgroups_network_interface_script = NULL;
const char *cgroups_network_interface_script = NULL;
int cgroups_check = 0;
uint32_t Read_hash = 0;
uint32_t Write_hash = 0;
@ -229,13 +229,17 @@ void read_cgroup_plugin_configuration() {
throttled_time_hash = simple_hash("throttled_time");
throttled_usec_hash = simple_hash("throttled_usec");
cgroup_update_every = (int)config_get_number("plugin:cgroups", "update every", localhost->rrd_update_every);
if(cgroup_update_every < localhost->rrd_update_every)
cgroup_update_every = (int)config_get_duration_seconds("plugin:cgroups", "update every", localhost->rrd_update_every);
if(cgroup_update_every < localhost->rrd_update_every) {
cgroup_update_every = localhost->rrd_update_every;
config_set_duration_seconds("plugin:cgroups", "update every", localhost->rrd_update_every);
}
cgroup_check_for_new_every = (int)config_get_number("plugin:cgroups", "check for new cgroups every", cgroup_check_for_new_every);
if(cgroup_check_for_new_every < cgroup_update_every)
cgroup_check_for_new_every = (int)config_get_duration_seconds("plugin:cgroups", "check for new cgroups every", cgroup_check_for_new_every);
if(cgroup_check_for_new_every < cgroup_update_every) {
cgroup_check_for_new_every = cgroup_update_every;
config_set_duration_seconds("plugin:cgroups", "check for new cgroups every", cgroup_check_for_new_every);
}
cgroup_use_unified_cgroups = config_get_boolean_ondemand("plugin:cgroups", "use unified cgroups", CONFIG_BOOLEAN_AUTO);
if (cgroup_use_unified_cgroups == CONFIG_BOOLEAN_AUTO)

View file

@ -121,16 +121,16 @@ You can also specify per mount point `[plugin:proc:diskspace:mountpoint]`
<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
| update every | Data collection frequency. | 1 | no |
| remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no |
| check for new mount points every | Parse proc files frequency. | 15 | no |
| exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no |
| Name | Description | Default | Required |
|:----|:-----------|:---------------------------------------------------------------------------------|:--------:|
| update every | Data collection frequency. | 1 | no |
| remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no |
| check for new mount points every | Parse proc files frequency. | 15s | no |
| exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no |
| exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no |
| exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no |
| space usage for all disks | Define if plugin will show metrics for space usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |
| inodes usage for all disks | Define if plugin will show metrics for inode usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |
| exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no |
| space usage for all disks | Define if plugin will show metrics for space usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |
| inodes usage for all disks | Define if plugin will show metrics for inode usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |
</details>

View file

@ -63,7 +63,7 @@ modules:
required: false
- name: check for new mount points every
description: Parse proc files frequency.
default_value: 15
default_value: 15s
required: false
- name: exclude space metrics on paths
description: Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern.

View file

@ -855,11 +855,13 @@ void *diskspace_main(void *ptr) {
cleanup_mount_points = config_get_boolean(CONFIG_SECTION_DISKSPACE, "remove charts of unmounted disks" , cleanup_mount_points);
int update_every = (int)config_get_number(CONFIG_SECTION_DISKSPACE, "update every", localhost->rrd_update_every);
if(update_every < localhost->rrd_update_every)
int update_every = (int)config_get_duration_seconds(CONFIG_SECTION_DISKSPACE, "update every", localhost->rrd_update_every);
if(update_every < localhost->rrd_update_every) {
update_every = localhost->rrd_update_every;
config_set_duration_seconds(CONFIG_SECTION_DISKSPACE, "update every", update_every);
}
check_for_new_mountpoints_every = (int)config_get_number(CONFIG_SECTION_DISKSPACE, "check for new mount points every", check_for_new_mountpoints_every);
check_for_new_mountpoints_every = (int)config_get_duration_seconds(CONFIG_SECTION_DISKSPACE, "check for new mount points every", check_for_new_mountpoints_every);
if(check_for_new_mountpoints_every < update_every)
check_for_new_mountpoints_every = update_every;

View file

@ -19,11 +19,7 @@ char *ebpf_plugin_dir = PLUGINS_DIR;
static char *ebpf_configured_log_dir = LOG_DIR;
char *ebpf_algorithms[] = { EBPF_CHART_ALGORITHM_ABSOLUTE, EBPF_CHART_ALGORITHM_INCREMENTAL};
struct config collector_config = { .first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
struct config collector_config = APPCONFIG_INITIALIZER;
int running_on_kernel = 0;
int ebpf_nprocs;
@ -661,7 +657,7 @@ struct vfs_bpf *vfs_bpf_obj = NULL;
#else
void *default_btf = NULL;
#endif
char *btf_path = NULL;
const char *btf_path = NULL;
/*****************************************************************
*
@ -1836,7 +1832,7 @@ static void ebpf_parse_ip_list_unsafe(void **out, char *ip)
*
* @param ptr is a pointer with the text to parse.
*/
void ebpf_parse_ips_unsafe(char *ptr)
void ebpf_parse_ips_unsafe(const char *ptr)
{
// No value
if (unlikely(!ptr))
@ -1927,7 +1923,7 @@ static inline void fill_port_list(ebpf_network_viewer_port_list_t **out, ebpf_ne
* @param out a pointer to store the link list
* @param service the service used to create the structure that will be linked.
*/
static void ebpf_parse_service_list(void **out, char *service)
static void ebpf_parse_service_list(void **out, const char *service)
{
ebpf_network_viewer_port_list_t **list = (ebpf_network_viewer_port_list_t **)out;
struct servent *serv = getservbyname((const char *)service, "tcp");
@ -1956,8 +1952,10 @@ static void ebpf_parse_service_list(void **out, char *service)
* @param out a pointer to store the link list
* @param range the informed range for the user.
*/
static void ebpf_parse_port_list(void **out, char *range)
{
static void ebpf_parse_port_list(void **out, const char *range_param) {
char range[strlen(range_param) + 1];
strncpyz(range, range_param, strlen(range_param));
int first, last;
ebpf_network_viewer_port_list_t **list = (ebpf_network_viewer_port_list_t **)out;
@ -2029,7 +2027,7 @@ static void ebpf_parse_port_list(void **out, char *range)
*
* @param ptr is a pointer with the text to parse.
*/
void ebpf_parse_ports(char *ptr)
void ebpf_parse_ports(const char *ptr)
{
// No value
if (unlikely(!ptr))
@ -2480,7 +2478,7 @@ static void ebpf_link_hostname(ebpf_network_viewer_hostname_list_t **out, ebpf_n
* @param out is the output link list
* @param parse is a pointer with the text to parser.
*/
static void ebpf_link_hostnames(char *parse)
static void ebpf_link_hostnames(const char *parse)
{
// No value
if (unlikely(!parse))
@ -2536,7 +2534,7 @@ void parse_network_viewer_section(struct config *cfg)
EBPF_CONFIG_RESOLVE_SERVICE,
CONFIG_BOOLEAN_YES);
char *value = appconfig_get(cfg, EBPF_NETWORK_VIEWER_SECTION, EBPF_CONFIG_PORTS, NULL);
const char *value = appconfig_get(cfg, EBPF_NETWORK_VIEWER_SECTION, EBPF_CONFIG_PORTS, NULL);
ebpf_parse_ports(value);
if (network_viewer_opt.hostname_resolution_enabled) {
@ -2684,7 +2682,7 @@ static void ebpf_allocate_common_vectors()
*
* @param ptr the option given by users
*/
static inline void ebpf_how_to_load(char *ptr)
static inline void ebpf_how_to_load(const char *ptr)
{
if (!strcasecmp(ptr, EBPF_CFG_LOAD_MODE_RETURN))
ebpf_set_thread_mode(MODE_RETURN);
@ -2808,7 +2806,7 @@ static void read_collector_values(int *disable_cgroups,
int update_every, netdata_ebpf_load_mode_t origin)
{
// Read global section
char *value;
const char *value;
if (appconfig_exists(&collector_config, EBPF_GLOBAL_SECTION, "load")) // Backward compatibility
value = appconfig_get(&collector_config, EBPF_GLOBAL_SECTION, "load",
EBPF_CFG_LOAD_MODE_DEFAULT);

View file

@ -43,11 +43,7 @@ ebpf_local_maps_t cachestat_maps[] = {{.name = "cstat_global", .internal_input =
#endif
}};
struct config cachestat_config = { .first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
struct config cachestat_config = APPCONFIG_INITIALIZER;
netdata_ebpf_targets_t cachestat_targets[] = { {.name = "add_to_page_cache_lru", .mode = EBPF_LOAD_TRAMPOLINE},
{.name = "mark_page_accessed", .mode = EBPF_LOAD_TRAMPOLINE},

View file

@ -12,11 +12,7 @@ netdata_dcstat_pid_t *dcstat_vector = NULL;
static netdata_idx_t dcstat_hash_values[NETDATA_DCSTAT_IDX_END];
static netdata_idx_t *dcstat_values = NULL;
struct config dcstat_config = { .first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
struct config dcstat_config = APPCONFIG_INITIALIZER;
ebpf_local_maps_t dcstat_maps[] = {{.name = "dcstat_global", .internal_input = NETDATA_DIRECTORY_CACHE_END,
.user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,

View file

@ -6,11 +6,7 @@
#include "ebpf.h"
#include "ebpf_disk.h"
struct config disk_config = { .first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
struct config disk_config = APPCONFIG_INITIALIZER;
static ebpf_local_maps_t disk_maps[] = {{.name = "tbl_disk_iocall", .internal_input = NETDATA_DISK_HISTOGRAM_LENGTH,
.user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,

View file

@ -46,9 +46,7 @@ static ebpf_local_maps_t fd_maps[] = {{.name = "tbl_fd_pid", .internal_input = N
}};
struct config fd_config = { .first_section = NULL, .last_section = NULL, .mutex = NETDATA_MUTEX_INITIALIZER,
.index = {.avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
struct config fd_config = APPCONFIG_INITIALIZER;
static netdata_idx_t fd_hash_values[NETDATA_FD_COUNTER];
static netdata_idx_t *fd_values = NULL;

View file

@ -2,11 +2,7 @@
#include "ebpf_filesystem.h"
struct config fs_config = { .first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
struct config fs_config = APPCONFIG_INITIALIZER;
ebpf_local_maps_t ext4_maps[] = {{.name = "tbl_ext4", .internal_input = NETDATA_KEY_CALLS_SYNC,
.user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,

View file

@ -3,11 +3,7 @@
#include "ebpf.h"
#include "ebpf_hardirq.h"
struct config hardirq_config = { .first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
struct config hardirq_config = APPCONFIG_INITIALIZER;
static ebpf_local_maps_t hardirq_maps[] = {
{

View file

@ -3,11 +3,7 @@
#include "ebpf.h"
#include "ebpf_mdflush.h"
struct config mdflush_config = { .first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
struct config mdflush_config = APPCONFIG_INITIALIZER;
#define MDFLUSH_MAP_COUNT 0
static ebpf_local_maps_t mdflush_maps[] = {

View file

@ -22,9 +22,7 @@ static char *mount_dimension_name[NETDATA_EBPF_MOUNT_SYSCALL] = { "mount", "umou
static netdata_syscall_stat_t mount_aggregated_data[NETDATA_EBPF_MOUNT_SYSCALL];
static netdata_publish_syscall_t mount_publish_aggregated[NETDATA_EBPF_MOUNT_SYSCALL];
struct config mount_config = { .first_section = NULL, .last_section = NULL, .mutex = NETDATA_MUTEX_INITIALIZER,
.index = {.avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
struct config mount_config = APPCONFIG_INITIALIZER;
static netdata_idx_t mount_hash_values[NETDATA_MOUNT_END];

View file

@ -3,11 +3,7 @@
#include "ebpf.h"
#include "ebpf_oomkill.h"
struct config oomkill_config = { .first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
struct config oomkill_config = APPCONFIG_INITIALIZER;
#define OOMKILL_MAP_KILLCNT 0
static ebpf_local_maps_t oomkill_maps[] = {

View file

@ -57,11 +57,7 @@ ebpf_process_stat_t *process_stat_vector = NULL;
static netdata_syscall_stat_t process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_END];
static netdata_publish_syscall_t process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_END];
struct config process_config = { .first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
struct config process_config = APPCONFIG_INITIALIZER;
/*****************************************************************
*

View file

@ -12,11 +12,7 @@ netdata_ebpf_shm_t *shm_vector = NULL;
static netdata_idx_t shm_hash_values[NETDATA_SHM_END];
static netdata_idx_t *shm_values = NULL;
struct config shm_config = { .first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
struct config shm_config = APPCONFIG_INITIALIZER;
static ebpf_local_maps_t shm_maps[] = {{.name = "tbl_pid_shm", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
.user_input = 0,

View file

@ -77,11 +77,7 @@ netdata_socket_t *socket_values;
ebpf_network_viewer_port_list_t *listen_ports = NULL;
ebpf_addresses_t tcp_v6_connect_address = {.function = "tcp_v6_connect", .hash = 0, .addr = 0, .type = 0};
struct config socket_config = { .first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
struct config socket_config = APPCONFIG_INITIALIZER;
netdata_ebpf_targets_t socket_targets[] = { {.name = "inet_csk_accept", .mode = EBPF_LOAD_PROBE},
{.name = "tcp_retransmit_skb", .mode = EBPF_LOAD_PROBE},
@ -2708,7 +2704,7 @@ static void ebpf_socket_initialize_global_vectors()
* @param hash the calculated hash for the dimension name.
* @param name the dimension name.
*/
static void ebpf_link_dimension_name(char *port, uint32_t hash, char *value)
static void ebpf_link_dimension_name(const char *port, uint32_t hash, const char *value)
{
int test = str2i(port);
if (test < NETDATA_MINIMUM_PORT_VALUE || test > NETDATA_MAXIMUM_PORT_VALUE){
@ -2753,15 +2749,15 @@ static void ebpf_link_dimension_name(char *port, uint32_t hash, char *value)
*
* @param cfg the configuration structure
*/
static bool config_service_value_cb(void *data __maybe_unused, const char *name, const char *value) {
ebpf_link_dimension_name(name, simple_hash(name), value);
return true;
}
void ebpf_parse_service_name_section(struct config *cfg)
{
struct section *co = appconfig_get_section(cfg, EBPF_SERVICE_NAME_SECTION);
if (co) {
struct config_option *cv;
for (cv = co->values; cv ; cv = cv->next) {
ebpf_link_dimension_name(cv->name, cv->hash, cv->value);
}
}
appconfig_foreach_value_in_section(cfg, EBPF_SERVICE_NAME_SECTION, config_service_value_cb, NULL);
// Always associated the default port to Netdata
ebpf_network_viewer_dim_name_t *names = network_viewer_opt.names;

View file

@ -339,8 +339,8 @@ extern ebpf_network_viewer_port_list_t *listen_ports;
void update_listen_table(uint16_t value, uint16_t proto, netdata_passive_connection_t *values);
void ebpf_fill_ip_list_unsafe(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_list_t *in, char *table);
void ebpf_parse_service_name_section(struct config *cfg);
void ebpf_parse_ips_unsafe(char *ptr);
void ebpf_parse_ports(char *ptr);
void ebpf_parse_ips_unsafe(const char *ptr);
void ebpf_parse_ports(const char *ptr);
void ebpf_socket_read_open_connections(BUFFER *buf, struct ebpf_module *em);
void ebpf_socket_fill_publish_apps(ebpf_socket_publish_apps_t *curr, netdata_socket_t *ns);

View file

@ -3,11 +3,7 @@
#include "ebpf.h"
#include "ebpf_softirq.h"
struct config softirq_config = { .first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
struct config softirq_config = APPCONFIG_INITIALIZER;
#define SOFTIRQ_MAP_LATENCY 0
static ebpf_local_maps_t softirq_maps[] = {

View file

@ -12,11 +12,7 @@ static netdata_idx_t *swap_values = NULL;
netdata_ebpf_swap_t *swap_vector = NULL;
struct config swap_config = { .first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
struct config swap_config = APPCONFIG_INITIALIZER;
static ebpf_local_maps_t swap_maps[] = {{.name = "tbl_pid_swap", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
.user_input = 0,

View file

@ -100,11 +100,7 @@ ebpf_local_maps_t sync_file_range_maps[] = {{.name = "tbl_syncfr", .internal_inp
#endif
}};
struct config sync_config = { .first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
struct config sync_config = APPCONFIG_INITIALIZER;
netdata_ebpf_targets_t sync_targets[] = { {.name = NETDATA_SYSCALLS_SYNC, .mode = EBPF_LOAD_TRAMPOLINE},
{.name = NETDATA_SYSCALLS_SYNCFS, .mode = EBPF_LOAD_TRAMPOLINE},

View file

@ -52,11 +52,7 @@ struct netdata_static_thread ebpf_read_vfs = {
.start_routine = NULL
};
struct config vfs_config = { .first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
struct config vfs_config = APPCONFIG_INITIALIZER;
netdata_ebpf_targets_t vfs_targets[] = { {.name = "vfs_write", .mode = EBPF_LOAD_TRAMPOLINE},
{.name = "vfs_writev", .mode = EBPF_LOAD_TRAMPOLINE},

View file

@ -109,8 +109,8 @@ This integration only supports a single configuration option, and most users wil
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
| loop time in ms | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20 | no |
|:----|:-----------|:--------|:--------:|
| loop time | Specifies the target time for the data collection thread to sleep. | 20ms | no |
#### Examples
There are no configuration examples.

View file

@ -55,10 +55,10 @@ modules:
title: ''
enabled: false
list:
- name: loop time in ms
- name: loop time
description: >
Specifies the target time for the data collection thread to sleep, measured in miliseconds.
default_value: 20
default_value: 20ms
required: false
examples:
folding:

View file

@ -22,9 +22,9 @@ void *cpuidlejitter_main(void *ptr) {
worker_register("IDLEJITTER");
worker_register_job_name(0, "measurements");
usec_t sleep_ut = config_get_number("plugin:idlejitter", "loop time in ms", CPU_IDLEJITTER_SLEEP_TIME_MS) * USEC_PER_MS;
usec_t sleep_ut = config_get_duration_ms("plugin:idlejitter", "loop time", CPU_IDLEJITTER_SLEEP_TIME_MS) * USEC_PER_MS;
if(sleep_ut <= 0) {
config_set_number("plugin:idlejitter", "loop time in ms", CPU_IDLEJITTER_SLEEP_TIME_MS);
config_set_duration_ms("plugin:idlejitter", "loop time", CPU_IDLEJITTER_SLEEP_TIME_MS);
sleep_ut = CPU_IDLEJITTER_SLEEP_TIME_MS * USEC_PER_MS;
}

View file

@ -321,7 +321,7 @@ void *pluginsd_main(void *ptr) {
cd->unsafe.enabled = enabled;
cd->unsafe.running = false;
cd->update_every = (int)config_get_number(cd->id, "update every", localhost->rrd_update_every);
cd->update_every = (int)config_get_duration_seconds(cd->id, "update every", localhost->rrd_update_every);
cd->started_t = now_realtime_sec();
char *def = "";

View file

@ -133,7 +133,7 @@ Then edit `netdata.conf` and find the following section. This is the basic plugi
# extended operations for all disks = auto
# backlog for all disks = auto
# bcache for all disks = auto
# bcache priority stats update every = 0
# bcache priority stats update every = off
# remove charts of removed disks = yes
# path to get block device = /sys/block/%s
# path to get block device bcache = /sys/block/%s/bcache
@ -578,7 +578,7 @@ Default configuration will monitor only enabled infiniband ports, and refresh ne
# hardware errors counters = auto
# monitor only ports being active = auto
# disable by default interfaces matching =
# refresh ports state every seconds = 30
# refresh ports state every = 30s
```
## AMD GPUs

View file

@ -182,7 +182,7 @@ static inline int ipc_sem_get_status(struct ipc_status *st) {
return 0;
}
int ipc_msq_get_info(char *msg_filename, struct message_queue **message_queue_root) {
static int ipc_msq_get_info(const char *msg_filename, struct message_queue **message_queue_root) {
static procfile *ff;
struct message_queue *msq;
@ -238,7 +238,7 @@ int ipc_msq_get_info(char *msg_filename, struct message_queue **message_queue_ro
return 0;
}
int ipc_shm_get_info(char *shm_filename, struct shm_stats *shm) {
static int ipc_shm_get_info(const char *shm_filename, struct shm_stats *shm) {
static procfile *ff;
if(unlikely(!ff)) {
@ -287,10 +287,10 @@ int do_ipc(int update_every, usec_t dt) {
static const RRDVAR_ACQUIRED *arrays_max = NULL, *semaphores_max = NULL;
static RRDSET *st_arrays = NULL;
static RRDDIM *rd_arrays = NULL;
static char *msg_filename = NULL;
static const char *msg_filename = NULL;
static struct message_queue *message_queue_root = NULL;
static long long dimensions_limit;
static char *shm_filename = NULL;
static const char *shm_filename = NULL;
if(unlikely(do_sem == -1)) {
do_msg = config_get_boolean("plugin:proc:ipc", "message queues", CONFIG_BOOLEAN_YES);

View file

@ -279,7 +279,7 @@ int get_numa_node_count(void)
char name[FILENAME_MAX + 1];
snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/node");
char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name);
const char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name);
DIR *dir = opendir(dirname);
if (dir) {

View file

@ -180,16 +180,16 @@ static struct disk {
#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete___safe_from_collector_thread(st); (st) = NULL; } } while(st)
static char *path_to_sys_dev_block_major_minor_string = NULL;
static char *path_to_sys_block_device = NULL;
static char *path_to_sys_block_device_bcache = NULL;
static char *path_to_sys_devices_virtual_block_device = NULL;
static char *path_to_device_mapper = NULL;
static char *path_to_dev_disk = NULL;
static char *path_to_sys_block = NULL;
static char *path_to_device_label = NULL;
static char *path_to_device_id = NULL;
static char *path_to_veritas_volume_groups = NULL;
static const char *path_to_sys_dev_block_major_minor_string = NULL;
static const char *path_to_sys_block_device = NULL;
static const char *path_to_sys_block_device_bcache = NULL;
static const char *path_to_sys_devices_virtual_block_device = NULL;
static const char *path_to_device_mapper = NULL;
static const char *path_to_dev_disk = NULL;
static const char *path_to_sys_block = NULL;
static const char *path_to_device_label = NULL;
static const char *path_to_device_id = NULL;
static const char *path_to_veritas_volume_groups = NULL;
static int name_disks_by_id = CONFIG_BOOLEAN_NO;
static int global_bcache_priority_stats_update_every = 0; // disabled by default
@ -1374,7 +1374,7 @@ int do_proc_diskstats(int update_every, usec_t dt) {
global_do_ext = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "extended operations for all disks", global_do_ext);
global_do_backlog = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "backlog for all disks", global_do_backlog);
global_do_bcache = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bcache for all disks", global_do_bcache);
global_bcache_priority_stats_update_every = (int)config_get_number(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bcache priority stats update every", global_bcache_priority_stats_update_every);
global_bcache_priority_stats_update_every = (int)config_get_duration_seconds(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bcache priority stats update every", global_bcache_priority_stats_update_every);
global_cleanup_removed_disks = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "remove charts of removed disks" , global_cleanup_removed_disks);

View file

@ -89,7 +89,7 @@ int do_proc_mdstat(int update_every, usec_t dt)
static int do_health = -1, do_nonredundant = -1, do_disks = -1, do_operations = -1, do_mismatch = -1,
do_mismatch_config = -1;
static int make_charts_obsolete = -1;
static char *mdstat_filename = NULL, *mismatch_cnt_filename = NULL;
static const char *mdstat_filename = NULL, *mismatch_cnt_filename = NULL;
static struct raid *raids = NULL;
static size_t raids_allocated = 0;
size_t raids_num = 0, raid_idx = 0, redundant_num = 0;

View file

@ -128,7 +128,7 @@ int do_proc_net_sockstat(int update_every, usec_t dt) {
do_frag_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 FRAG sockets", CONFIG_BOOLEAN_AUTO);
do_frag_mem = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 FRAG memory", CONFIG_BOOLEAN_AUTO);
update_constants_every = config_get_number("plugin:proc:/proc/net/sockstat", "update constants every", update_constants_every);
update_constants_every = config_get_duration_seconds("plugin:proc:/proc/net/sockstat", "update constants every", update_constants_every);
update_constants_count = update_constants_every;
arl_sockets = arl_create("sockstat/sockets", arl_callback_str2kernel_uint_t, 60);

View file

@ -11,7 +11,7 @@ int do_proc_net_stat_conntrack(int update_every, usec_t dt) {
static int do_sockets = -1, do_new = -1, do_changes = -1, do_expect = -1, do_search = -1, do_errors = -1;
static usec_t get_max_every = 10 * USEC_PER_SEC, usec_since_last_max = 0;
static int read_full = 1;
static char *nf_conntrack_filename, *nf_conntrack_count_filename, *nf_conntrack_max_filename;
static const char *nf_conntrack_filename, *nf_conntrack_count_filename, *nf_conntrack_max_filename;
static const RRDVAR_ACQUIRED *rrdvar_max = NULL;
unsigned long long aentries = 0, asearched = 0, afound = 0, anew = 0, ainvalid = 0, aignore = 0, adelete = 0, adelete_list = 0,

View file

@ -208,7 +208,7 @@ int do_proc_net_wireless(int update_every, usec_t dt)
UNUSED(dt);
static procfile *ff = NULL;
static int do_status, do_quality = -1, do_discarded_packets, do_beacon;
static char *proc_net_wireless_filename = NULL;
static const char *proc_net_wireless_filename = NULL;
if (unlikely(do_quality == -1)) {
char filename[FILENAME_MAX + 1];

View file

@ -158,7 +158,7 @@ int do_proc_pressure(int update_every, usec_t dt) {
int i;
static usec_t next_pressure_dt = 0;
static char *base_path = NULL;
static const char *base_path = NULL;
update_every = (update_every < MIN_PRESSURE_UPDATE_EVERY) ? MIN_PRESSURE_UPDATE_EVERY : update_every;
pressure_update_every = update_every;
@ -170,9 +170,8 @@ int do_proc_pressure(int update_every, usec_t dt) {
return 0;
}
if (unlikely(!base_path)) {
if (unlikely(!base_path))
base_path = config_get(CONFIG_SECTION_PLUGIN_PROC_PRESSURE, "base path of pressure metrics", "/proc/pressure");
}
for (i = 0; i < PRESSURE_NUM_RESOURCES; i++) {
procfile *ff = resource_info[i].pf;

View file

@ -18,7 +18,7 @@ int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt) {
static int do_zfs_stats = 0;
static procfile *ff = NULL;
static char *dirname = NULL;
static const char *dirname = NULL;
static ARL_BASE *arl_base = NULL;
arcstats.l2exist = -1;

View file

@ -293,7 +293,7 @@ static void* wake_cpu_thread(void* core) {
return 0;
}
static int read_schedstat(char *schedstat_filename, struct per_core_cpuidle_chart **cpuidle_charts_address, size_t *schedstat_cores_found) {
static int read_schedstat(const char *schedstat_filename, struct per_core_cpuidle_chart **cpuidle_charts_address, size_t *schedstat_cores_found) {
static size_t cpuidle_charts_len = 0;
static procfile *ff = NULL;
struct per_core_cpuidle_chart *cpuidle_charts = *cpuidle_charts_address;
@ -373,7 +373,7 @@ static int read_one_state(char *buf, const char *filename, int *fd) {
return 1;
}
static int read_cpuidle_states(char *cpuidle_name_filename , char *cpuidle_time_filename, struct per_core_cpuidle_chart *cpuidle_charts, size_t core) {
static int read_cpuidle_states(const char *cpuidle_name_filename, const char *cpuidle_time_filename, struct per_core_cpuidle_chart *cpuidle_charts, size_t core) {
char filename[FILENAME_MAX + 1];
static char next_state_filename[FILENAME_MAX + 1];
struct stat stbuf;
@ -484,7 +484,7 @@ int do_proc_stat(int update_every, usec_t dt) {
static int do_cpu = -1, do_cpu_cores = -1, do_interrupts = -1, do_context = -1, do_forks = -1, do_processes = -1,
do_core_throttle_count = -1, do_package_throttle_count = -1, do_cpu_freq = -1, do_cpuidle = -1;
static uint32_t hash_intr, hash_ctxt, hash_processes, hash_procs_running, hash_procs_blocked;
static char *core_throttle_count_filename = NULL, *package_throttle_count_filename = NULL, *scaling_cur_freq_filename = NULL,
static const char *core_throttle_count_filename = NULL, *package_throttle_count_filename = NULL, *scaling_cur_freq_filename = NULL,
*time_in_state_filename = NULL, *schedstat_filename = NULL, *cpuidle_name_filename = NULL, *cpuidle_time_filename = NULL;
static const RRDVAR_ACQUIRED *cpus_var = NULL;
static int accurate_freq_avail = 0, accurate_freq_is_used = 0;

View file

@ -5,7 +5,7 @@
int do_proc_uptime(int update_every, usec_t dt) {
(void)dt;
static char *uptime_filename = NULL;
static const char *uptime_filename = NULL;
if(!uptime_filename) {
char filename[FILENAME_MAX + 1];
snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/uptime");

View file

@ -837,7 +837,7 @@ int do_sys_class_drm(int update_every, usec_t dt) {
if(unlikely(!drm_dir)) {
char filename[FILENAME_MAX + 1];
snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/class/drm");
char *drm_dir_name = config_get(CONFIG_SECTION_PLUGIN_PROC_DRM, "directory to monitor", filename);
const char *drm_dir_name = config_get(CONFIG_SECTION_PLUGIN_PROC_DRM, "directory to monitor", filename);
if(unlikely(NULL == (drm_dir = opendir(drm_dir_name)))){
collector_error("Cannot read directory '%s'", drm_dir_name);
return 1;

View file

@ -302,7 +302,7 @@ int do_sys_class_infiniband(int update_every, usec_t dt)
static int initialized = 0;
static int enable_new_ports = -1, enable_only_active = CONFIG_BOOLEAN_YES;
static int do_bytes = -1, do_packets = -1, do_errors = -1, do_hwpackets = -1, do_hwerrors = -1;
static char *sys_class_infiniband_dirname = NULL;
static const char *sys_class_infiniband_dirname = NULL;
static long long int dt_to_refresh_ports = 0, last_refresh_ports_usec = 0;
@ -332,7 +332,7 @@ int do_sys_class_infiniband(int update_every, usec_t dt)
SIMPLE_PATTERN_EXACT, true);
dt_to_refresh_ports =
config_get_number(CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "refresh ports state every seconds", 30) *
config_get_duration_seconds(CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "refresh ports state every", 30) *
USEC_PER_SEC;
if (dt_to_refresh_ports < 0)
dt_to_refresh_ports = 0;

View file

@ -199,7 +199,7 @@ int do_sys_class_power_supply(int update_every, usec_t dt) {
(void)dt;
static int do_capacity = -1, do_power = -1, do_property[3] = {-1};
static int keep_fds_open = CONFIG_BOOLEAN_NO, keep_fds_open_config = -1;
static char *dirname = NULL;
static const char *dirname = NULL;
if(unlikely(do_capacity == -1)) {
do_capacity = config_get_boolean("plugin:proc:/sys/class/power_supply", "battery capacity", CONFIG_BOOLEAN_YES);

View file

@ -2,7 +2,7 @@
#include "plugin_proc.h"
static char *pci_aer_dirname = NULL;
static const char *pci_aer_dirname = NULL;
typedef enum __attribute__((packed)) {
AER_DEV_NONFATAL = (1 << 0),

View file

@ -37,7 +37,7 @@ struct mc {
};
static struct mc *mc_root = NULL;
static char *mc_dirname = NULL;
static const char *mc_dirname = NULL;
static void find_all_mc() {
char name[FILENAME_MAX + 1];

View file

@ -15,7 +15,7 @@ static int find_all_nodes() {
int numa_node_count = 0;
char name[FILENAME_MAX + 1];
snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/node");
char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name);
const char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name);
DIR *dir = opendir(dirname);
if(!dir) {

View file

@ -678,7 +678,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) {
, do_error_stats = CONFIG_BOOLEAN_AUTO;
static usec_t refresh_delta = 0, refresh_every = 60 * USEC_PER_SEC;
static char *btrfs_path = NULL;
static const char *btrfs_path = NULL;
(void)dt;
@ -689,7 +689,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) {
snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/fs/btrfs");
btrfs_path = config_get("plugin:proc:/sys/fs/btrfs", "path to monitor", filename);
refresh_every = config_get_number("plugin:proc:/sys/fs/btrfs", "check for btrfs changes every", refresh_every / USEC_PER_SEC) * USEC_PER_SEC;
refresh_every = config_get_duration_seconds("plugin:proc:/sys/fs/btrfs", "check for btrfs changes every", refresh_every / USEC_PER_SEC) * USEC_PER_SEC;
refresh_delta = refresh_every;
do_allocation_disks = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "physical disks allocation", do_allocation_disks);

View file

@ -194,9 +194,11 @@ static void profile_main_cleanup(void *pptr) {
extern "C" void *profile_main(void *ptr) {
CLEANUP_FUNCTION_REGISTER(profile_main_cleanup) cleanup_ptr = ptr;
int UpdateEvery = (int) config_get_number(CONFIG_SECTION_PROFILE, "update every", 1);
if (UpdateEvery < localhost->rrd_update_every)
int UpdateEvery = (int) config_get_duration_seconds(CONFIG_SECTION_PROFILE, "update every", 1);
if (UpdateEvery < localhost->rrd_update_every) {
UpdateEvery = localhost->rrd_update_every;
config_set_duration_seconds(CONFIG_SECTION_PROFILE, "update every", UpdateEvery);
}
// pick low-default values, in case this plugin is ever enabled accidentaly.
size_t NumThreads = config_get_number(CONFIG_SECTION_PROFILE, "number of threads", 2);

View file

@ -170,11 +170,11 @@ You can find the configuration at `/etc/netdata/netdata.conf`:
[statsd]
# enabled = yes
# decimal detail = 1000
# update every (flushInterval) = 1
# update every (flushInterval) = 1s
# udp messages to process at once = 10
# create private charts for metrics matching = *
# max private charts hard limit = 1000
# cleanup obsolete charts after secs = 0
# cleanup obsolete charts after = 0
# private charts memory mode = save
# private charts history = 3996
# histograms and timers percentile (percentThreshold) = 95.00000
@ -204,7 +204,7 @@ You can find the configuration at `/etc/netdata/netdata.conf`:
is a space separated list of IPs and ports to listen to. The format is `PROTOCOL:IP:PORT` - if `PORT` is omitted, the `default port` will be used. If `IP` is IPv6, it needs to be enclosed in `[]`. `IP` can also be `*` (to listen on all IPs) or even a hostname.
- `update every (flushInterval) = 1` seconds, controls the frequency StatsD will push the collected metrics to Netdata charts.
- `update every (flushInterval) = 1s` controls the frequency StatsD will push the collected metrics to Netdata charts.
- `decimal detail = 1000` controls the number of fractional digits in gauges and histograms. Netdata collects metrics using signed 64-bit integers and their fractional detail is controlled using multipliers and divisors. This setting is used to multiply all collected values to convert them to integers and is also set as the divisors, so that the final data will be a floating point number with this fractional detail (1000 = X.0 - X.999, 10000 = X.0 - X.9999, etc).
@ -238,7 +238,7 @@ The default behavior is to use the same settings as the rest of the Netdata Agen
For optimization reasons, Netdata imposes a hard limit on private metric charts. The limit is set via the `max private charts hard limit` setting (which defaults to 1000 charts). Metrics above this hard limit are still collected, but they can only be used in synthetic charts (once a metric is added to chart, it will be sent to backend servers too).
If you have many ephemeral metrics collected (i.e. that you collect values for a certain amount of time), you can set the configuration option `set charts as obsolete after secs`. Setting a value in seconds here, means that Netdata will mark those metrics (and their private charts) as obsolete after the specified time has passed since the last sent metric value. Those charts will later be deleted according to the setting in `cleanup obsolete charts after secs`. Setting `set charts as obsolete after secs` to 0 (which is also the default value) will disable this functionality.
If you have many ephemeral metrics collected (i.e. that you collect values for a certain amount of time), you can set the configuration option `set charts as obsolete after`. Setting a value in seconds here, means that Netdata will mark those metrics (and their private charts) as obsolete after the specified time has passed since the last sent metric value. Those charts will later be deleted according to the setting in `cleanup obsolete charts after`. Setting `set charts as obsolete after` to 0 (which is also the default value) will disable this functionality.
Example private charts (automatically generated without any configuration):

View file

@ -2491,10 +2491,11 @@ void *statsd_main(void *ptr) {
statsd.enabled = config_get_boolean(CONFIG_SECTION_PLUGINS, "statsd", statsd.enabled);
statsd.update_every = default_rrd_update_every;
statsd.update_every = (int)config_get_number(CONFIG_SECTION_STATSD, "update every (flushInterval)", statsd.update_every);
statsd.update_every = (int)config_get_duration_seconds(CONFIG_SECTION_STATSD, "update every (flushInterval)", statsd.update_every);
if(statsd.update_every < default_rrd_update_every) {
collector_error("STATSD: minimum flush interval %d given, but the minimum is the update every of netdata. Using %d", statsd.update_every, default_rrd_update_every);
statsd.update_every = default_rrd_update_every;
config_set_duration_seconds(CONFIG_SECTION_STATSD, "update every (flushInterval)", statsd.update_every);
}
#ifdef HAVE_RECVMMSG
@ -2504,13 +2505,26 @@ void *statsd_main(void *ptr) {
statsd.charts_for = simple_pattern_create(
config_get(CONFIG_SECTION_STATSD, "create private charts for metrics matching", "*"), NULL,
SIMPLE_PATTERN_EXACT, true);
statsd.max_private_charts_hard = (size_t)config_get_number(CONFIG_SECTION_STATSD, "max private charts hard limit", (long long)statsd.max_private_charts_hard);
statsd.set_obsolete_after = (size_t)config_get_number(CONFIG_SECTION_STATSD, "set charts as obsolete after secs", (long long)statsd.set_obsolete_after);
statsd.decimal_detail = (collected_number)config_get_number(CONFIG_SECTION_STATSD, "decimal detail", (long long int)statsd.decimal_detail);
statsd.tcp_idle_timeout = (size_t) config_get_number(CONFIG_SECTION_STATSD, "disconnect idle tcp clients after seconds", (long long int)statsd.tcp_idle_timeout);
statsd.private_charts_hidden = (unsigned int)config_get_boolean(CONFIG_SECTION_STATSD, "private charts hidden", statsd.private_charts_hidden);
statsd.histogram_percentile = (double)config_get_float(CONFIG_SECTION_STATSD, "histograms and timers percentile (percentThreshold)", statsd.histogram_percentile);
statsd.max_private_charts_hard =
(size_t)config_get_number(CONFIG_SECTION_STATSD, "max private charts hard limit", (long long)statsd.max_private_charts_hard);
statsd.set_obsolete_after =
(size_t)config_get_duration_seconds(CONFIG_SECTION_STATSD, "set charts as obsolete after", (long long)statsd.set_obsolete_after);
statsd.decimal_detail =
(collected_number)config_get_number(CONFIG_SECTION_STATSD, "decimal detail", (long long int)statsd.decimal_detail);
statsd.tcp_idle_timeout =
(size_t) config_get_duration_seconds(CONFIG_SECTION_STATSD, "disconnect idle tcp clients after", (long long int)statsd.tcp_idle_timeout);
statsd.private_charts_hidden =
(unsigned int)config_get_boolean(CONFIG_SECTION_STATSD, "private charts hidden", statsd.private_charts_hidden);
statsd.histogram_percentile =
(double)config_get_double(
CONFIG_SECTION_STATSD, "histograms and timers percentile (percentThreshold)", statsd.histogram_percentile);
if(isless(statsd.histogram_percentile, 0) || isgreater(statsd.histogram_percentile, 100)) {
collector_error("STATSD: invalid histograms and timers percentile %0.5f given", statsd.histogram_percentile);
statsd.histogram_percentile = 95.0;
@ -2521,7 +2535,8 @@ void *statsd_main(void *ptr) {
statsd.histogram_percentile_str = strdupz(buffer);
}
statsd.dictionary_max_unique = config_get_number(CONFIG_SECTION_STATSD, "dictionaries max unique dimensions", statsd.dictionary_max_unique);
statsd.dictionary_max_unique =
config_get_number(CONFIG_SECTION_STATSD, "dictionaries max unique dimensions", statsd.dictionary_max_unique);
if(config_get_boolean(CONFIG_SECTION_STATSD, "add dimension for number of events received", 0)) {
statsd.gauges.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;

View file

@ -912,7 +912,7 @@ void *tc_main(void *ptr) {
uint32_t first_hash;
snprintfz(command, TC_LINE_MAX, "%s/tc-qos-helper.sh", netdata_configured_primary_plugins_dir);
char *tc_script = config_get("plugin:tc", "script to run to get tc values", command);
const char *tc_script = config_get("plugin:tc", "script to run to get tc values", command);
while(service_running(SERVICE_COLLECTORS)) {
struct tc_device *device = NULL;

View file

@ -50,9 +50,11 @@ void *timex_main(void *ptr)
worker_register("TIMEX");
worker_register_job_name(0, "clock check");
int update_every = (int)config_get_number(CONFIG_SECTION_TIMEX, "update every", 10);
if (update_every < localhost->rrd_update_every)
int update_every = (int)config_get_duration_seconds(CONFIG_SECTION_TIMEX, "update every", 10);
if (update_every < localhost->rrd_update_every) {
update_every = localhost->rrd_update_every;
config_set_duration_seconds(CONFIG_SECTION_TIMEX, "update every", update_every);
}
int do_sync = config_get_boolean(CONFIG_SECTION_TIMEX, "clock synchronization state", CONFIG_BOOLEAN_YES);
int do_offset = config_get_boolean(CONFIG_SECTION_TIMEX, "time offset", CONFIG_BOOLEAN_YES);

View file

@ -261,9 +261,8 @@ static cmd_status_t cmd_read_config_execute(char *args, char **message)
const char *conf_file = temp; /* "cloud" is cloud.conf, otherwise netdata.conf */
struct config *tmp_config = strcmp(conf_file, "cloud") ? &netdata_config : &cloud_config;
char *value = appconfig_get(tmp_config, temp + offset + 1, temp + offset2 + 1, NULL);
if (value == NULL)
{
const char *value = appconfig_get(tmp_config, temp + offset + 1, temp + offset2 + 1, NULL);
if (value == NULL) {
netdata_log_error("Cannot execute read-config conf_file=%s section=%s / key=%s because no value set",
conf_file,
temp + offset + 1,
@ -271,13 +270,11 @@ static cmd_status_t cmd_read_config_execute(char *args, char **message)
freez(temp);
return CMD_STATUS_FAILURE;
}
else
{
else {
(*message) = strdupz(value);
freez(temp);
return CMD_STATUS_SUCCESS;
}
}
static cmd_status_t cmd_write_config_execute(char *args, char **message)

View file

@ -2,21 +2,21 @@
#include "common.h"
char *netdata_configured_hostname = NULL;
char *netdata_configured_user_config_dir = CONFIG_DIR;
char *netdata_configured_stock_config_dir = LIBCONFIG_DIR;
char *netdata_configured_log_dir = LOG_DIR;
char *netdata_configured_primary_plugins_dir = PLUGINS_DIR;
char *netdata_configured_web_dir = WEB_DIR;
char *netdata_configured_cache_dir = CACHE_DIR;
char *netdata_configured_varlib_dir = VARLIB_DIR;
char *netdata_configured_lock_dir = VARLIB_DIR "/lock";
char *netdata_configured_cloud_dir = VARLIB_DIR "/cloud.d";
char *netdata_configured_home_dir = VARLIB_DIR;
char *netdata_configured_host_prefix = NULL;
char *netdata_configured_timezone = NULL;
char *netdata_configured_abbrev_timezone = NULL;
int32_t netdata_configured_utc_offset = 0;
const char *netdata_configured_hostname = NULL;
const char *netdata_configured_user_config_dir = CONFIG_DIR;
const char *netdata_configured_stock_config_dir = LIBCONFIG_DIR;
const char *netdata_configured_log_dir = LOG_DIR;
const char *netdata_configured_primary_plugins_dir = PLUGINS_DIR;
const char *netdata_configured_web_dir = WEB_DIR;
const char *netdata_configured_cache_dir = CACHE_DIR;
const char *netdata_configured_varlib_dir = VARLIB_DIR;
const char *netdata_configured_lock_dir = VARLIB_DIR "/lock";
const char *netdata_configured_cloud_dir = VARLIB_DIR "/cloud.d";
const char *netdata_configured_home_dir = VARLIB_DIR;
const char *netdata_configured_host_prefix = NULL;
const char *netdata_configured_timezone = NULL;
const char *netdata_configured_abbrev_timezone = NULL;
int32_t netdata_configured_utc_offset = 0;
bool netdata_ready = false;

View file

@ -6,31 +6,6 @@
#include "libnetdata/libnetdata.h"
#include "libuv_workers.h"
// ----------------------------------------------------------------------------
// shortcuts for the default netdata configuration
#define config_load(filename, overwrite_used, section) appconfig_load(&netdata_config, filename, overwrite_used, section)
#define config_get(section, name, default_value) appconfig_get(&netdata_config, section, name, default_value)
#define config_get_number(section, name, value) appconfig_get_number(&netdata_config, section, name, value)
#define config_get_float(section, name, value) appconfig_get_float(&netdata_config, section, name, value)
#define config_get_boolean(section, name, value) appconfig_get_boolean(&netdata_config, section, name, value)
#define config_get_boolean_ondemand(section, name, value) appconfig_get_boolean_ondemand(&netdata_config, section, name, value)
#define config_get_duration(section, name, value) appconfig_get_duration(&netdata_config, section, name, value)
#define config_set(section, name, default_value) appconfig_set(&netdata_config, section, name, default_value)
#define config_set_default(section, name, value) appconfig_set_default(&netdata_config, section, name, value)
#define config_set_number(section, name, value) appconfig_set_number(&netdata_config, section, name, value)
#define config_set_float(section, name, value) appconfig_set_float(&netdata_config, section, name, value)
#define config_set_boolean(section, name, value) appconfig_set_boolean(&netdata_config, section, name, value)
#define config_exists(section, name) appconfig_exists(&netdata_config, section, name)
#define config_move(section_old, name_old, section_new, name_new) appconfig_move(&netdata_config, section_old, name_old, section_new, name_new)
#define netdata_conf_generate(buffer, only_changed) appconfig_generate(&netdata_config, buffer, only_changed, true)
#define config_section_destroy(section) appconfig_section_destroy_non_loaded(&netdata_config, section)
#define config_section_option_destroy(section, name) appconfig_section_option_destroy_non_loaded(&netdata_config, section, name)
// ----------------------------------------------------------------------------
// netdata include files
@ -96,20 +71,20 @@
#include "analytics.h"
// global netdata daemon variables
extern char *netdata_configured_hostname;
extern char *netdata_configured_user_config_dir;
extern char *netdata_configured_stock_config_dir;
extern char *netdata_configured_log_dir;
extern char *netdata_configured_primary_plugins_dir;
extern char *netdata_configured_web_dir;
extern char *netdata_configured_cache_dir;
extern char *netdata_configured_varlib_dir;
extern char *netdata_configured_lock_dir;
extern char *netdata_configured_cloud_dir;
extern char *netdata_configured_home_dir;
extern char *netdata_configured_host_prefix;
extern char *netdata_configured_timezone;
extern char *netdata_configured_abbrev_timezone;
extern const char *netdata_configured_hostname;
extern const char *netdata_configured_user_config_dir;
extern const char *netdata_configured_stock_config_dir;
extern const char *netdata_configured_log_dir;
extern const char *netdata_configured_primary_plugins_dir;
extern const char *netdata_configured_web_dir;
extern const char *netdata_configured_cache_dir;
extern const char *netdata_configured_varlib_dir;
extern const char *netdata_configured_lock_dir;
extern const char *netdata_configured_cloud_dir;
extern const char *netdata_configured_home_dir;
extern const char *netdata_configured_host_prefix;
extern const char *netdata_configured_timezone;
extern const char *netdata_configured_abbrev_timezone;
extern int32_t netdata_configured_utc_offset;
extern int netdata_anonymous_statistics_enabled;

View file

@ -86,24 +86,22 @@ Please note that your data history will be lost if you have modified `history` p
### [db] section options
| setting | default | info |
|:---------------------------------------------:|:----------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| mode | `dbengine` | `dbengine`: The default for long-term metrics storage with efficient RAM and disk usage. Can be extended with `dbengine page cache size MB` and `dbengine disk space MB`. <br />`ram`: The round-robin database will be temporary and it will be lost when Netdata exits. <br />`alloc`: Similar to `ram`, but can significantly reduce memory usage, when combined with a low retention and does not support KSM. <br />`none`: Disables the database at this host, and disables health monitoring entirely, as that requires a database of metrics. Not to be used together with streaming. |
| retention | `3600` | Used with `mode = ram/alloc`, not the default `mode = dbengine`. This number reflects the number of entries the `netdata` daemon will by default keep in memory for each chart dimension. Check [Memory Requirements](/src/database/README.md) for more information. |
| storage tiers | `3` | The number of storage tiers you want to have in your dbengine. Check the tiering mechanism in the [dbengine's reference](/src/database/engine/README.md#tiering). You can have up to 5 tiers of data (including the _Tier 0_). This number ranges between 1 and 5. |
| dbengine page cache size MB | `32` | Determines the amount of RAM in MiB that is dedicated to caching for _Tier 0_ Netdata metric values. |
| dbengine tier **`N`** page cache size MB | `32` | Determines the amount of RAM in MiB that is dedicated for caching Netdata metric values of the **`N`** tier. <br /> `N belongs to [1..4]` |
| dbengine disk space MB | `256` | Determines the amount of disk space in MiB that is dedicated to storing _Tier 0_ Netdata metric values and all related metadata describing them. This option is available **only for legacy configuration** (`Agent v1.23.2 and prior`). |
| dbengine multihost disk space MB | `256` | Same functionality as `dbengine disk space MB`, but includes support for storing metrics streamed to a parent node by its children. Can be used in single-node environments as well. This setting is only for _Tier 0_ metrics. |
| dbengine tier **`N`** multihost disk space MB | `256` | Same functionality as `dbengine multihost disk space MB`, but stores metrics of the **`N`** tier (both parent node and its children). Can be used in single-node environments as well. <br /> `N belongs to [1..4]` |
| update every | `1` | The frequency in seconds, for data collection. For more information see the [performance guide](/docs/netdata-agent/configuration/optimize-the-netdata-agents-performance.md). These metrics stored as _Tier 0_ data. Explore the tiering mechanism in the [dbengine's reference](/src/database/engine/README.md#tiering). |
| dbengine tier **`N`** update every iterations | `60` | The down sampling value of each tier from the previous one. For each Tier, the greater by one Tier has N (equal to 60 by default) less data points of any metric it collects. This setting can take values from `2` up to `255`. <br /> `N belongs to [1..4]` |
| dbengine tier **`N`** back fill | `New` | Specifies the strategy of recreating missing data on each Tier from the exact lower Tier. <br /> `New`: Sees the latest point on each Tier and save new points to it only if the exact lower Tier has available points for it's observation window (`dbengine tier N update every iterations` window). <br /> `none`: No back filling is applied. <br /> `N belongs to [1..4]` |
| memory deduplication (ksm) | `yes` | When set to `yes`, Netdata will offer its in-memory round robin database and the dbengine page cache to kernel same page merging (KSM) for deduplication. For more information check [Memory Deduplication - Kernel Same Page Merging - KSM](/src/database/README.md#ksm) |
| cleanup obsolete charts after secs | `3600` | See [monitoring ephemeral containers](/src/collectors/cgroups.plugin/README.md#monitoring-ephemeral-containers), also sets the timeout for cleaning up obsolete dimensions |
| gap when lost iterations above | `1` | |
| cleanup orphan hosts after secs | `3600` | How long to wait until automatically removing from the DB a remote Netdata host (child) that is no longer sending data. |
| enable zero metrics | `no` | Set to `yes` to show charts when all their metrics are zero. |
| setting | default | info |
|:---------------------------------------------:|:-------------------------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| mode | `dbengine` | `dbengine`: The default for long-term metrics storage with efficient RAM and disk usage. Can be extended with `dbengine page cache size` and `dbengine tier X retention size`. <br />`ram`: The round-robin database will be temporary and it will be lost when Netdata exits. <br />`alloc`: Similar to `ram`, but can significantly reduce memory usage, when combined with a low retention and does not support KSM. <br />`none`: Disables the database at this host, and disables health monitoring entirely, as that requires a database of metrics. Not to be used together with streaming. |
| retention | `3600` | Used with `mode = ram/alloc`, not the default `mode = dbengine`. This number reflects the number of entries the `netdata` daemon will by default keep in memory for each chart dimension. Check [Memory Requirements](/src/database/README.md) for more information. |
| storage tiers | `3` | The number of storage tiers you want to have in your dbengine. Check the tiering mechanism in the [dbengine's reference](/src/database/engine/README.md#tiering). You can have up to 5 tiers of data (including the _Tier 0_). This number ranges between 1 and 5. |
| dbengine page cache size | `32MiB` | Determines the amount of RAM in MiB that is dedicated to caching for _Tier 0_ Netdata metric values. |
| dbengine tier **`N`** retention size | `1GiB` | The disk space dedicated to metrics storage, per tier. Can be used in single-node environments as well. <br /> `N belongs to [1..4]` |
| dbengine tier **`N`** retention time | `14d`, `3mo`, `1y`, `1y`, `1y` | The database retention, expressed in time. Can be used in single-node environments as well. <br /> `N belongs to [1..4]` |
| update every | `1` | The frequency in seconds, for data collection. For more information see the [performance guide](/docs/netdata-agent/configuration/optimize-the-netdata-agents-performance.md). These metrics stored as _Tier 0_ data. Explore the tiering mechanism in the [dbengine's reference](/src/database/engine/README.md#tiering). |
| dbengine tier **`N`** update every iterations | `60` | The down sampling value of each tier from the previous one. For each Tier, the greater by one Tier has N (equal to 60 by default) less data points of any metric it collects. This setting can take values from `2` up to `255`. <br /> `N belongs to [1..4]` |
| dbengine tier back fill | `new` | Specifies the strategy of recreating missing data on higher database Tiers.<br /> `new`: Sees the latest point on each Tier and save new points to it only if the exact lower Tier has available points for it's observation window (`dbengine tier N update every iterations` window). <br /> `none`: No back filling is applied. <br /> `N belongs to [1..4]` |
| memory deduplication (ksm) | `yes` | When set to `yes`, Netdata will offer its in-memory round robin database and the dbengine page cache to kernel same page merging (KSM) for deduplication. For more information check [Memory Deduplication - Kernel Same Page Merging - KSM](/src/database/README.md#ksm) |
| cleanup obsolete charts after | `1h` | See [monitoring ephemeral containers](/src/collectors/cgroups.plugin/README.md#monitoring-ephemeral-containers), also sets the timeout for cleaning up obsolete dimensions |
| gap when lost iterations above | `1` | |
| cleanup orphan hosts after | `1h` | How long to wait until automatically removing from the DB a remote Netdata host (child) that is no longer sending data. |
| enable zero metrics | `no` | Set to `yes` to show charts when all their metrics are zero. |
> ### Info
>
@ -140,7 +138,7 @@ There are additional configuration options for the logs. For more info, see [Net
| health | `journal` | The filename to save the log of Netdata health collectors. You can also set it to `syslog` to send the access log to syslog, or `off` to disable this log. Defaults to `Journal` if using systemd. |
| daemon | `journal` | The filename to save the log of Netdata daemon. You can also set it to `syslog` to send the access log to syslog, or `off` to disable this log. Defaults to `Journal` if using systemd. |
| facility | `daemon` | A facility keyword is used to specify the type of system that is logging the message. |
| logs flood protection period | `60` | Length of period (in sec) during which the number of errors should not exceed the `errors to trigger flood protection`. |
| logs flood protection period | `1m` | Length of period during which the number of errors should not exceed the `errors to trigger flood protection`. |
| logs to trigger flood protection | `1000` | Number of errors written to the log in `errors flood protection period` sec before flood protection is activated. |
| level | `info` | Controls which log messages are logged, with error being the most important. Supported values: `info` and `error`. |
@ -172,15 +170,15 @@ monitoring](/src/health/README.md).
[Alert notifications](/src/health/notifications/README.md) are configured in `health_alarm_notify.conf`.
| setting | default | info |
|:----------------------------------------------:|:------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| enabled | `yes` | Set to `no` to disable all alerts and notifications |
| in memory max health log entries | 1000 | Size of the alert history held in RAM |
| script to execute on alarm | `/usr/libexec/netdata/plugins.d/alarm-notify.sh` | The script that sends alert notifications. Note that in versions before 1.16, the plugins.d directory may be installed in a different location in certain OSs (e.g. under `/usr/lib/netdata`). |
| run at least every seconds | `10` | Controls how often all alert conditions should be evaluated. |
| postpone alarms during hibernation for seconds | `60` | Prevents false alerts. May need to be increased if you get alerts during hibernation. |
| health log history | `432000` | Specifies the history of alert events (in seconds) kept in the agent's sqlite database. |
| enabled alarms | * | Defines which alerts to load from both user and stock directories. This is a [simple pattern](/src/libnetdata/simple_pattern/README.md) list of alert or template names. Can be used to disable specific alerts. For example, `enabled alarms = !oom_kill *` will load all alerts except `oom_kill`. |
| setting | default | info |
|:--------------------------------------:|:------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| enabled | `yes` | Set to `no` to disable all alerts and notifications |
| in memory max health log entries | 1000 | Size of the alert history held in RAM |
| script to execute on alarm | `/usr/libexec/netdata/plugins.d/alarm-notify.sh` | The script that sends alert notifications. Note that in versions before 1.16, the plugins.d directory may be installed in a different location in certain OSs (e.g. under `/usr/lib/netdata`). |
| run at least every | `10s` | Controls how often all alert conditions should be evaluated. |
| postpone alarms during hibernation for | `1m` | Prevents false alerts. May need to be increased if you get alerts during hibernation. |
| health log retention | `5d` | Specifies the history of alert events (in seconds) kept in the agent's sqlite database. |
| enabled alarms | * | Defines which alerts to load from both user and stock directories. This is a [simple pattern](/src/libnetdata/simple_pattern/README.md) list of alert or template names. Can be used to disable specific alerts. For example, `enabled alarms = !oom_kill *` will load all alerts except `oom_kill`. |
### [web] section options

View file

@ -58,7 +58,7 @@ static void change_dir_ownership(const char *dir, uid_t uid, gid_t gid, bool rec
fix_directory_file_permissions(dir, uid, gid, recursive);
}
static void clean_directory(char *dirname)
static void clean_directory(const char *dirname)
{
DIR *dir = opendir(dirname);
if(!dir) return;
@ -188,7 +188,7 @@ static void oom_score_adj(void) {
}
// check the environment
char *s = getenv("OOMScoreAdjust");
const char *s = getenv("OOMScoreAdjust");
if(!s || !*s) {
snprintfz(buf, sizeof(buf) - 1, "%d", (int)wanted_score);
s = buf;

View file

@ -69,7 +69,7 @@ void set_environment_for_plugins_and_scripts(void) {
buffer_free(user_plugins_dirs);
}
char *default_port = appconfig_get(&netdata_config, CONFIG_SECTION_WEB, "default port", NULL);
const char *default_port = appconfig_get(&netdata_config, CONFIG_SECTION_WEB, "default port", NULL);
int clean = 0;
if (!default_port) {
default_port = strdupz("19999");
@ -78,7 +78,7 @@ void set_environment_for_plugins_and_scripts(void) {
nd_setenv("NETDATA_LISTEN_PORT", default_port, 1);
if (clean)
freez(default_port);
freez((char *)default_port);
// set the path we need
char path[4096], *p = getenv("PATH");

View file

@ -4221,9 +4221,11 @@ void *global_statistics_main(void *ptr)
global_statistics_register_workers();
int update_every =
(int)config_get_number(CONFIG_SECTION_GLOBAL_STATISTICS, "update every", localhost->rrd_update_every);
if (update_every < localhost->rrd_update_every)
(int)config_get_duration_seconds(CONFIG_SECTION_GLOBAL_STATISTICS, "update every", localhost->rrd_update_every);
if (update_every < localhost->rrd_update_every) {
update_every = localhost->rrd_update_every;
config_set_duration_seconds(CONFIG_SECTION_GLOBAL_STATISTICS, "update every", update_every);
}
usec_t step = update_every * USEC_PER_SEC;
heartbeat_t hb;
@ -4277,9 +4279,11 @@ void *global_statistics_extended_main(void *ptr)
global_statistics_register_workers();
int update_every =
(int)config_get_number(CONFIG_SECTION_GLOBAL_STATISTICS, "update every", localhost->rrd_update_every);
if (update_every < localhost->rrd_update_every)
(int)config_get_duration_seconds(CONFIG_SECTION_GLOBAL_STATISTICS, "update every", localhost->rrd_update_every);
if (update_every < localhost->rrd_update_every) {
update_every = localhost->rrd_update_every;
config_set_duration_seconds(CONFIG_SECTION_GLOBAL_STATISTICS, "update every", update_every);
}
usec_t step = update_every * USEC_PER_SEC;
heartbeat_t hb;

View file

@ -28,18 +28,7 @@ bool ieee754_doubles = false;
time_t netdata_start_time = 0;
struct netdata_static_thread *static_threads;
struct config netdata_config = {
.first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
.index = {
.avl_tree = {
.root = NULL,
.compar = appconfig_section_compare
},
.rwlock = AVL_LOCK_INITIALIZER
}
};
struct config netdata_config = APPCONFIG_INITIALIZER;
typedef struct service_thread {
pid_t tid;
@ -527,12 +516,12 @@ void web_server_threading_selection(void) {
int make_dns_decision(const char *section_name, const char *config_name, const char *default_value, SIMPLE_PATTERN *p)
{
char *value = config_get(section_name,config_name,default_value);
const char *value = config_get(section_name,config_name,default_value);
if(!strcmp("yes",value))
return 1;
if(!strcmp("no",value))
return 0;
if(strcmp("heuristic",value))
if(strcmp("heuristic",value) != 0)
netdata_log_error("Invalid configuration option '%s' for '%s'/'%s'. Valid options are 'yes', 'no' and 'heuristic'. Proceeding with 'heuristic'",
value, section_name, config_name);
@ -542,11 +531,13 @@ int make_dns_decision(const char *section_name, const char *config_name, const c
void web_server_config_options(void)
{
web_client_timeout =
(int)config_get_number(CONFIG_SECTION_WEB, "disconnect idle clients after seconds", web_client_timeout);
(int)config_get_duration_seconds(CONFIG_SECTION_WEB, "disconnect idle clients after", web_client_timeout);
web_client_first_request_timeout =
(int)config_get_number(CONFIG_SECTION_WEB, "timeout for first request", web_client_first_request_timeout);
(int)config_get_duration_seconds(CONFIG_SECTION_WEB, "timeout for first request", web_client_first_request_timeout);
web_client_streaming_rate_t =
config_get_number(CONFIG_SECTION_WEB, "accept a streaming request every seconds", web_client_streaming_rate_t);
config_get_duration_seconds(CONFIG_SECTION_WEB, "accept a streaming request every", web_client_streaming_rate_t);
respect_web_browser_do_not_track_policy =
config_get_boolean(CONFIG_SECTION_WEB, "respect do not track policy", respect_web_browser_do_not_track_policy);
@ -595,7 +586,7 @@ void web_server_config_options(void)
web_enable_gzip = config_get_boolean(CONFIG_SECTION_WEB, "enable gzip compression", web_enable_gzip);
char *s = config_get(CONFIG_SECTION_WEB, "gzip compression strategy", "default");
const char *s = config_get(CONFIG_SECTION_WEB, "gzip compression strategy", "default");
if(!strcmp(s, "default"))
web_gzip_strategy = Z_DEFAULT_STRATEGY;
else if(!strcmp(s, "filtered"))
@ -842,7 +833,7 @@ static void log_init(void) {
time_t period = ND_LOG_DEFAULT_THROTTLE_PERIOD;
size_t logs = ND_LOG_DEFAULT_THROTTLE_LOGS;
period = config_get_number(CONFIG_SECTION_LOGS, "logs flood protection period", period);
period = config_get_duration_seconds(CONFIG_SECTION_LOGS, "logs flood protection period", period);
logs = (unsigned long)config_get_number(CONFIG_SECTION_LOGS, "logs to trigger flood protection", (long long int)logs);
nd_log_set_flood_protection(logs, period);
@ -886,7 +877,7 @@ static void log_init(void) {
aclk_config_get_query_scope();
}
static char *get_varlib_subdir_from_config(const char *prefix, const char *dir) {
static const char *get_varlib_subdir_from_config(const char *prefix, const char *dir) {
char filename[FILENAME_MAX + 1];
snprintfz(filename, FILENAME_MAX, "%s/%s", prefix, dir);
return config_get(CONFIG_SECTION_DIRECTORIES, dir, filename);
@ -894,6 +885,7 @@ static char *get_varlib_subdir_from_config(const char *prefix, const char *dir)
static void backwards_compatible_config() {
// move [global] options to the [web] section
config_move(CONFIG_SECTION_GLOBAL, "http port listen backlog",
CONFIG_SECTION_WEB, "listen backlog");
@ -997,7 +989,10 @@ static void backwards_compatible_config() {
CONFIG_SECTION_PLUGINS, "statsd");
config_move(CONFIG_SECTION_GLOBAL, "memory mode",
CONFIG_SECTION_DB, "mode");
CONFIG_SECTION_DB, "db");
config_move(CONFIG_SECTION_DB, "mode",
CONFIG_SECTION_DB, "db");
config_move(CONFIG_SECTION_GLOBAL, "history",
CONFIG_SECTION_DB, "retention");
@ -1006,7 +1001,13 @@ static void backwards_compatible_config() {
CONFIG_SECTION_DB, "update every");
config_move(CONFIG_SECTION_GLOBAL, "page cache size",
CONFIG_SECTION_DB, "dbengine page cache size MB");
CONFIG_SECTION_DB, "dbengine page cache size");
config_move(CONFIG_SECTION_DB, "dbengine page cache size MB",
CONFIG_SECTION_DB, "dbengine page cache size");
config_move(CONFIG_SECTION_DB, "dbengine extent cache size MB",
CONFIG_SECTION_DB, "dbengine extent cache size");
config_move(CONFIG_SECTION_DB, "page cache size",
CONFIG_SECTION_DB, "dbengine page cache size MB");
@ -1017,30 +1018,6 @@ static void backwards_compatible_config() {
config_move(CONFIG_SECTION_DB, "page cache with malloc",
CONFIG_SECTION_DB, "dbengine page cache with malloc");
config_move(CONFIG_SECTION_GLOBAL, "dbengine disk space",
CONFIG_SECTION_DB, "dbengine disk space MB");
config_move(CONFIG_SECTION_GLOBAL, "dbengine multihost disk space",
CONFIG_SECTION_DB, "dbengine multihost disk space MB");
config_move(CONFIG_SECTION_DB, "dbengine disk space MB",
CONFIG_SECTION_DB, "dbengine multihost disk space MB");
config_move(CONFIG_SECTION_DB, "dbengine multihost disk space MB",
CONFIG_SECTION_DB, "dbengine tier 0 disk space MB");
config_move(CONFIG_SECTION_DB, "dbengine tier 1 multihost disk space MB",
CONFIG_SECTION_DB, "dbengine tier 1 disk space MB");
config_move(CONFIG_SECTION_DB, "dbengine tier 2 multihost disk space MB",
CONFIG_SECTION_DB, "dbengine tier 2 disk space MB");
config_move(CONFIG_SECTION_DB, "dbengine tier 3 multihost disk space MB",
CONFIG_SECTION_DB, "dbengine tier 3 disk space MB");
config_move(CONFIG_SECTION_DB, "dbengine tier 4 multihost disk space MB",
CONFIG_SECTION_DB, "dbengine tier 4 disk space MB");
config_move(CONFIG_SECTION_GLOBAL, "memory deduplication (ksm)",
CONFIG_SECTION_DB, "memory deduplication (ksm)");
@ -1054,17 +1031,67 @@ static void backwards_compatible_config() {
CONFIG_SECTION_DB, "dbengine pages per extent");
config_move(CONFIG_SECTION_GLOBAL, "cleanup obsolete charts after seconds",
CONFIG_SECTION_DB, "cleanup obsolete charts after secs");
CONFIG_SECTION_DB, "cleanup obsolete charts after");
config_move(CONFIG_SECTION_DB, "cleanup obsolete charts after secs",
CONFIG_SECTION_DB, "cleanup obsolete charts after");
config_move(CONFIG_SECTION_GLOBAL, "gap when lost iterations above",
CONFIG_SECTION_DB, "gap when lost iterations above");
config_move(CONFIG_SECTION_GLOBAL, "cleanup orphan hosts after seconds",
CONFIG_SECTION_DB, "cleanup orphan hosts after secs");
CONFIG_SECTION_DB, "cleanup orphan hosts after");
config_move(CONFIG_SECTION_DB, "cleanup orphan hosts after secs",
CONFIG_SECTION_DB, "cleanup orphan hosts after");
config_move(CONFIG_SECTION_DB, "cleanup ephemeral hosts after secs",
CONFIG_SECTION_DB, "cleanup ephemeral hosts after");
config_move(CONFIG_SECTION_DB, "seconds to replicate",
CONFIG_SECTION_DB, "replication period");
config_move(CONFIG_SECTION_DB, "seconds per replication step",
CONFIG_SECTION_DB, "replication step");
config_move(CONFIG_SECTION_GLOBAL, "enable zero metrics",
CONFIG_SECTION_DB, "enable zero metrics");
// ----------------------------------------------------------------------------------------------------------------
config_move(CONFIG_SECTION_GLOBAL, "dbengine disk space",
CONFIG_SECTION_DB, "dbengine tier 0 retention size");
config_move(CONFIG_SECTION_GLOBAL, "dbengine multihost disk space",
CONFIG_SECTION_DB, "dbengine tier 0 retention size");
config_move(CONFIG_SECTION_DB, "dbengine disk space MB",
CONFIG_SECTION_DB, "dbengine tier 0 retention size");
for(size_t tier = 0; tier < RRD_STORAGE_TIERS ;tier++) {
char old_config[128], new_config[128];
snprintfz(old_config, sizeof(old_config), "dbengine tier %zu retention days", tier);
snprintfz(new_config, sizeof(new_config), "dbengine tier %zu retention time", tier);
config_move(CONFIG_SECTION_DB, old_config,
CONFIG_SECTION_DB, new_config);
if(tier == 0)
snprintfz(old_config, sizeof(old_config), "dbengine multihost disk space MB");
else
snprintfz(old_config, sizeof(old_config), "dbengine tier %zu multihost disk space MB", tier);
snprintfz(new_config, sizeof(new_config), "dbengine tier %zu retention size", tier);
config_move(CONFIG_SECTION_DB, old_config,
CONFIG_SECTION_DB, new_config);
snprintfz(old_config, sizeof(old_config), "dbengine tier %zu disk space MB", tier);
snprintfz(new_config, sizeof(new_config), "dbengine tier %zu retention size", tier);
config_move(CONFIG_SECTION_DB, old_config,
CONFIG_SECTION_DB, new_config);
}
// ----------------------------------------------------------------------------------------------------------------
config_move(CONFIG_SECTION_LOGS, "error",
CONFIG_SECTION_LOGS, "daemon");
@ -1076,11 +1103,42 @@ static void backwards_compatible_config() {
config_move(CONFIG_SECTION_LOGS, "errors flood protection period",
CONFIG_SECTION_LOGS, "logs flood protection period");
config_move(CONFIG_SECTION_HEALTH, "is ephemeral",
CONFIG_SECTION_GLOBAL, "is ephemeral node");
config_move(CONFIG_SECTION_HEALTH, "has unstable connection",
CONFIG_SECTION_GLOBAL, "has unstable connection");
config_move(CONFIG_SECTION_HEALTH, "run at least every seconds",
CONFIG_SECTION_HEALTH, "run at least every");
config_move(CONFIG_SECTION_HEALTH, "postpone alarms during hibernation for seconds",
CONFIG_SECTION_HEALTH, "postpone alarms during hibernation for");
config_move(CONFIG_SECTION_HEALTH, "health log history",
CONFIG_SECTION_HEALTH, "health log retention");
config_move(CONFIG_SECTION_REGISTRY, "registry expire idle persons days",
CONFIG_SECTION_REGISTRY, "registry expire idle persons");
config_move(CONFIG_SECTION_WEB, "disconnect idle clients after seconds",
CONFIG_SECTION_WEB, "disconnect idle clients after");
config_move(CONFIG_SECTION_WEB, "accept a streaming request every seconds",
CONFIG_SECTION_WEB, "accept a streaming request every");
config_move(CONFIG_SECTION_STATSD, "set charts as obsolete after secs",
CONFIG_SECTION_STATSD, "set charts as obsolete after");
config_move(CONFIG_SECTION_STATSD, "disconnect idle tcp clients after seconds",
CONFIG_SECTION_STATSD, "disconnect idle tcp clients after");
config_move("plugin:idlejitter", "loop time in ms",
"plugin:idlejitter", "loop time");
config_move("plugin:proc:/sys/class/infiniband", "refresh ports state every seconds",
"plugin:proc:/sys/class/infiniband", "refresh ports state every");
}
static int get_hostname(char *buf, size_t buf_size) {
@ -1125,22 +1183,22 @@ static void get_netdata_configured_variables()
// ------------------------------------------------------------------------
// get default database update frequency
default_rrd_update_every = (int) config_get_number(CONFIG_SECTION_DB, "update every", UPDATE_EVERY);
default_rrd_update_every = (int) config_get_duration_seconds(CONFIG_SECTION_DB, "update every", UPDATE_EVERY);
if(default_rrd_update_every < 1 || default_rrd_update_every > 600) {
netdata_log_error("Invalid data collection frequency (update every) %d given. Defaulting to %d.", default_rrd_update_every, UPDATE_EVERY);
default_rrd_update_every = UPDATE_EVERY;
config_set_number(CONFIG_SECTION_DB, "update every", default_rrd_update_every);
config_set_duration_seconds(CONFIG_SECTION_DB, "update every", default_rrd_update_every);
}
// ------------------------------------------------------------------------
// get default memory mode for the database
// get the database selection
{
const char *mode = config_get(CONFIG_SECTION_DB, "mode", rrd_memory_mode_name(default_rrd_memory_mode));
const char *mode = config_get(CONFIG_SECTION_DB, "db", rrd_memory_mode_name(default_rrd_memory_mode));
default_rrd_memory_mode = rrd_memory_mode_id(mode);
if(strcmp(mode, rrd_memory_mode_name(default_rrd_memory_mode)) != 0) {
netdata_log_error("Invalid memory mode '%s' given. Using '%s'", mode, rrd_memory_mode_name(default_rrd_memory_mode));
config_set(CONFIG_SECTION_DB, "mode", rrd_memory_mode_name(default_rrd_memory_mode));
config_set(CONFIG_SECTION_DB, "db", rrd_memory_mode_name(default_rrd_memory_mode));
}
}
@ -1194,17 +1252,19 @@ static void get_netdata_configured_variables()
// ------------------------------------------------------------------------
// get default Database Engine page cache size in MiB
default_rrdeng_page_cache_mb = (int) config_get_number(CONFIG_SECTION_DB, "dbengine page cache size MB", default_rrdeng_page_cache_mb);
default_rrdeng_extent_cache_mb = (int) config_get_number(CONFIG_SECTION_DB, "dbengine extent cache size MB", default_rrdeng_extent_cache_mb);
default_rrdeng_page_cache_mb = (int) config_get_size_mb(CONFIG_SECTION_DB, "dbengine page cache size", default_rrdeng_page_cache_mb);
default_rrdeng_extent_cache_mb = (int) config_get_size_mb(CONFIG_SECTION_DB, "dbengine extent cache size", default_rrdeng_extent_cache_mb);
db_engine_journal_check = config_get_boolean(CONFIG_SECTION_DB, "dbengine enable journal integrity check", CONFIG_BOOLEAN_NO);
if(default_rrdeng_extent_cache_mb < 0)
if(default_rrdeng_extent_cache_mb < 0) {
default_rrdeng_extent_cache_mb = 0;
config_set_size_mb(CONFIG_SECTION_DB, "dbengine extent cache size", default_rrdeng_extent_cache_mb);
}
if(default_rrdeng_page_cache_mb < RRDENG_MIN_PAGE_CACHE_SIZE_MB) {
netdata_log_error("Invalid page cache size %d given. Defaulting to %d.", default_rrdeng_page_cache_mb, RRDENG_MIN_PAGE_CACHE_SIZE_MB);
default_rrdeng_page_cache_mb = RRDENG_MIN_PAGE_CACHE_SIZE_MB;
config_set_number(CONFIG_SECTION_DB, "dbengine page cache size MB", default_rrdeng_page_cache_mb);
config_set_size_mb(CONFIG_SECTION_DB, "dbengine page cache size", default_rrdeng_page_cache_mb);
}
// ------------------------------------------------------------------------
@ -1237,28 +1297,24 @@ static void get_netdata_configured_variables()
// get KSM settings
#ifdef MADV_MERGEABLE
enable_ksm = config_get_boolean(CONFIG_SECTION_DB, "memory deduplication (ksm)", enable_ksm);
enable_ksm = config_get_boolean_ondemand(CONFIG_SECTION_DB, "memory deduplication (ksm)", enable_ksm);
#endif
// --------------------------------------------------------------------
// metric correlations
enable_metric_correlations = config_get_boolean(CONFIG_SECTION_GLOBAL, "enable metric correlations", enable_metric_correlations);
default_metric_correlations_method = weights_string_to_method(config_get(
CONFIG_SECTION_GLOBAL, "metric correlations method",
weights_method_to_string(default_metric_correlations_method)));
rrdhost_free_ephemeral_time_s =
config_get_duration_seconds(CONFIG_SECTION_DB, "cleanup ephemeral hosts after", rrdhost_free_ephemeral_time_s);
// --------------------------------------------------------------------
rrdset_free_obsolete_time_s =
config_get_duration_seconds(CONFIG_SECTION_DB, "cleanup obsolete charts after", rrdset_free_obsolete_time_s);
rrdset_free_obsolete_time_s = config_get_number(CONFIG_SECTION_DB, "cleanup obsolete charts after secs", rrdset_free_obsolete_time_s);
rrdhost_free_ephemeral_time_s = config_get_number(CONFIG_SECTION_DB, "cleanup ephemeral hosts after secs", rrdhost_free_ephemeral_time_s);
// Current chart locking and invalidation scheme doesn't prevent Netdata from segmentation faults if a short
// cleanup delay is set. Extensive stress tests showed that 10 seconds is quite a safe delay. Look at
// https://github.com/netdata/netdata/pull/11222#issuecomment-868367920 for more information.
if (rrdset_free_obsolete_time_s < 10) {
rrdset_free_obsolete_time_s = 10;
netdata_log_info("The \"cleanup obsolete charts after seconds\" option was set to 10 seconds.");
config_set_number(CONFIG_SECTION_DB, "cleanup obsolete charts after secs", rrdset_free_obsolete_time_s);
netdata_log_info("The \"cleanup obsolete charts after\" option was set to 10 seconds.");
config_set_duration_seconds(CONFIG_SECTION_DB, "cleanup obsolete charts after", rrdset_free_obsolete_time_s);
}
gap_when_lost_iterations_above = (int)config_get_number(CONFIG_SECTION_DB, "gap when lost iterations above", gap_when_lost_iterations_above);
@ -1278,7 +1334,7 @@ static void get_netdata_configured_variables()
}
static void post_conf_load(char **user)
static void post_conf_load(const char **user)
{
// --------------------------------------------------------------------
// get the user we should run
@ -1293,7 +1349,7 @@ static void post_conf_load(char **user)
}
}
static bool load_netdata_conf(char *filename, char overwrite_used, char **user) {
static bool load_netdata_conf(char *filename, char overwrite_used, const char **user) {
errno_clear();
int ret = 0;
@ -1406,7 +1462,7 @@ bool netdata_random_session_id_generate(void);
int windows_perflib_dump(const char *key);
#endif
int unittest_prepare_rrd(char **user) {
int unittest_prepare_rrd(const char **user) {
post_conf_load(user);
get_netdata_configured_variables();
default_rrd_update_every = 1;
@ -1437,7 +1493,7 @@ int netdata_main(int argc, char **argv) {
int config_loaded = 0;
bool close_open_fds = true;
size_t default_stacksize;
char *user = NULL;
const char *user = NULL;
#ifdef OS_WINDOWS
int dont_fork = 1;
@ -1787,7 +1843,7 @@ int netdata_main(int argc, char **argv) {
// so the caller can use -c netdata.conf before or
// after this parameter to prevent or allow overwriting
// variables at netdata.conf
config_set_default(section, key, value);
config_set_default_raw_value(section, key, value);
// fprintf(stderr, "SET section '%s', key '%s', value '%s'\n", section, key, value);
}
@ -1820,7 +1876,7 @@ int netdata_main(int argc, char **argv) {
// so the caller can use -c netdata.conf before or
// after this parameter to prevent or allow overwriting
// variables at netdata.conf
appconfig_set_default(tmp_config, section, key, value);
appconfig_set_default_raw_value(tmp_config, section, key, value);
// fprintf(stderr, "SET section '%s', key '%s', value '%s'\n", section, key, value);
}
@ -1922,7 +1978,7 @@ int netdata_main(int argc, char **argv) {
// ------------------------------------------------------------------------
// initialize netdata
{
char *pmax = config_get(CONFIG_SECTION_GLOBAL, "glibc malloc arena max for plugins", "1");
const char *pmax = config_get(CONFIG_SECTION_GLOBAL, "glibc malloc arena max for plugins", "1");
if(pmax && *pmax)
setenv("MALLOC_ARENA_MAX", pmax, 1);
@ -1979,7 +2035,7 @@ int netdata_main(int argc, char **argv) {
// --------------------------------------------------------------------
// get the debugging flags from the configuration file
char *flags = config_get(CONFIG_SECTION_LOGS, "debug flags", "0x0000000000000000");
const char *flags = config_get(CONFIG_SECTION_LOGS, "debug flags", "0x0000000000000000");
nd_setenv("NETDATA_DEBUG_FLAGS", flags, 1);
debug_flags = strtoull(flags, NULL, 0);
@ -2008,8 +2064,6 @@ int netdata_main(int argc, char **argv) {
check_local_streaming_capabilities();
aral_judy_init();
get_system_timezone();
replication_initialize();
@ -2159,7 +2213,7 @@ int netdata_main(int argc, char **argv) {
delta_startup_time("initialize threads after fork");
netdata_threads_init_after_fork((size_t)config_get_number(CONFIG_SECTION_GLOBAL, "pthread stack size", (long)default_stacksize));
netdata_threads_init_after_fork((size_t)config_get_size_bytes(CONFIG_SECTION_GLOBAL, "pthread stack size", default_stacksize));
// initialize internal registry
delta_startup_time("initialize registry");

View file

@ -5,30 +5,6 @@
void build_info_to_json_object(BUFFER *b);
static void convert_seconds_to_dhms(time_t seconds, char *result, int result_size) {
int days, hours, minutes;
days = (int) (seconds / (24 * 3600));
seconds = (int) (seconds % (24 * 3600));
hours = (int) (seconds / 3600);
seconds %= 3600;
minutes = (int) (seconds / 60);
seconds %= 60;
// Format the result into the provided string buffer
BUFFER *buf = buffer_create(128, NULL);
if (days)
buffer_sprintf(buf,"%d day%s%s", days, days==1 ? "" : "s", hours || minutes ? ", " : "");
if (hours)
buffer_sprintf(buf,"%d hour%s%s", hours, hours==1 ? "" : "s", minutes ? ", " : "");
if (minutes)
buffer_sprintf(buf,"%d minute%s%s", minutes, minutes==1 ? "" : "s", seconds ? ", " : "");
if (seconds)
buffer_sprintf(buf,"%d second%s", (int) seconds, seconds==1 ? "" : "s");
strncpyz(result, buffer_tostring(buf), result_size);
buffer_free(buf);
}
void buffer_json_agents_v2(BUFFER *wb, struct query_timings *timings, time_t now_s, bool info, bool array) {
if(!now_s)
now_s = now_realtime_sec();
@ -117,8 +93,8 @@ void buffer_json_agents_v2(BUFFER *wb, struct query_timings *timings, time_t now
buffer_json_add_array_item_object(wb);
buffer_json_member_add_uint64(wb, "tier", tier);
char human_retention[128];
convert_seconds_to_dhms((time_t) group_seconds, human_retention, sizeof(human_retention) - 1);
buffer_json_member_add_string(wb, "point_every", human_retention);
duration_snprintf_time_t(human_retention, sizeof(human_retention), (stime_t)group_seconds);
buffer_json_member_add_string(wb, "granularity", human_retention);
buffer_json_member_add_uint64(wb, "metrics", storage_engine_metrics(eng->seb, localhost->db[tier].si));
buffer_json_member_add_uint64(wb, "samples", storage_engine_samples(eng->seb, localhost->db[tier].si));
@ -136,7 +112,9 @@ void buffer_json_agents_v2(BUFFER *wb, struct query_timings *timings, time_t now
buffer_json_member_add_time_t(wb, "to", now_s);
buffer_json_member_add_time_t(wb, "retention", retention);
convert_seconds_to_dhms(retention, human_retention, sizeof(human_retention) - 1);
duration_snprintf_hours(human_retention, sizeof(human_retention),
(int)duration_round_to_resolution(retention, 3600));
buffer_json_member_add_string(wb, "retention_human", human_retention);
if(used || max) { // we have disk space information
@ -148,12 +126,16 @@ void buffer_json_agents_v2(BUFFER *wb, struct query_timings *timings, time_t now
time_t actual_retention = MIN(space_retention, time_retention ? time_retention : space_retention);
if (time_retention) {
convert_seconds_to_dhms(time_retention, human_retention, sizeof(human_retention) - 1);
duration_snprintf_hours(human_retention, sizeof(human_retention),
(int)duration_round_to_resolution(time_retention, 3600));
buffer_json_member_add_time_t(wb, "requested_retention", time_retention);
buffer_json_member_add_string(wb, "requested_retention_human", human_retention);
}
convert_seconds_to_dhms(actual_retention, human_retention, sizeof(human_retention) - 1);
duration_snprintf_hours(human_retention, sizeof(human_retention),
(int)duration_round_to_resolution(actual_retention, 3600));
buffer_json_member_add_time_t(wb, "expected_retention", actual_retention);
buffer_json_member_add_string(wb, "expected_retention_human", human_retention);
}

View file

@ -1096,7 +1096,7 @@ typedef struct alarm_log {
uint32_t next_alarm_id;
unsigned int count;
unsigned int max;
uint32_t health_log_history; // the health log history in seconds to be kept in db
uint32_t health_log_retention_s; // the health log retention in seconds to be kept in db
ALARM_ENTRY *alarms;
RW_SPINLOCK spinlock;
} ALARM_LOG;
@ -1384,7 +1384,7 @@ void rrddim_index_destroy(RRDSET *st);
extern time_t rrdhost_free_orphan_time_s;
extern time_t rrdhost_free_ephemeral_time_s;
int rrd_init(char *hostname, struct rrdhost_system_info *system_info, bool unittest);
int rrd_init(const char *hostname, struct rrdhost_system_info *system_info, bool unittest);
RRDHOST *rrdhost_find_by_hostname(const char *hostname);
RRDHOST *rrdhost_find_by_guid(const char *guid);
@ -1405,9 +1405,9 @@ RRDHOST *rrdhost_find_or_create(
RRD_MEMORY_MODE mode,
unsigned int health_enabled,
unsigned int rrdpush_enabled,
char *rrdpush_destination,
char *rrdpush_api_key,
char *rrdpush_send_charts_matching,
const char *rrdpush_destination,
const char *rrdpush_api_key,
const char *rrdpush_send_charts_matching,
bool rrdpush_enable_replication,
time_t rrdpush_seconds_to_replicate,
time_t rrdpush_replication_step,

View file

@ -232,10 +232,10 @@ void set_host_properties(RRDHOST *host, int update_every, RRD_MEMORY_MODE memory
// RRDHOST - add a host
static void rrdhost_initialize_rrdpush_sender(RRDHOST *host,
unsigned int rrdpush_enabled,
char *rrdpush_destination,
char *rrdpush_api_key,
char *rrdpush_send_charts_matching
unsigned int rrdpush_enabled,
const char *rrdpush_destination,
const char *rrdpush_api_key,
const char *rrdpush_send_charts_matching
) {
if(rrdhost_flag_check(host, RRDHOST_FLAG_RRDPUSH_SENDER_INITIALIZED)) return;
@ -341,9 +341,9 @@ static RRDHOST *rrdhost_create(
RRD_MEMORY_MODE memory_mode,
unsigned int health_enabled,
unsigned int rrdpush_enabled,
char *rrdpush_destination,
char *rrdpush_api_key,
char *rrdpush_send_charts_matching,
const char *rrdpush_destination,
const char *rrdpush_api_key,
const char *rrdpush_send_charts_matching,
bool rrdpush_enable_replication,
time_t rrdpush_seconds_to_replicate,
time_t rrdpush_replication_step,
@ -474,7 +474,7 @@ static RRDHOST *rrdhost_create(
if (is_localhost && host->system_info) {
host->system_info->ml_capable = ml_capable();
host->system_info->ml_enabled = ml_enabled(host);
host->system_info->mc_version = enable_metric_correlations ? metric_correlations_version : 0;
host->system_info->mc_version = metric_correlations_version;
}
// ------------------------------------------------------------------------
@ -566,9 +566,9 @@ static void rrdhost_update(RRDHOST *host
, RRD_MEMORY_MODE mode
, unsigned int health_enabled
, unsigned int rrdpush_enabled
, char *rrdpush_destination
, char *rrdpush_api_key
, char *rrdpush_send_charts_matching
, const char *rrdpush_destination
, const char *rrdpush_api_key
, const char *rrdpush_send_charts_matching
, bool rrdpush_enable_replication
, time_t rrdpush_seconds_to_replicate
, time_t rrdpush_replication_step
@ -706,9 +706,9 @@ RRDHOST *rrdhost_find_or_create(
, RRD_MEMORY_MODE mode
, unsigned int health_enabled
, unsigned int rrdpush_enabled
, char *rrdpush_destination
, char *rrdpush_api_key
, char *rrdpush_send_charts_matching
, const char *rrdpush_destination
, const char *rrdpush_api_key
, const char *rrdpush_send_charts_matching
, bool rrdpush_enable_replication
, time_t rrdpush_seconds_to_replicate
, time_t rrdpush_replication_step
@ -862,7 +862,7 @@ RRD_BACKFILL get_dbengine_backfill(RRD_BACKFILL backfill)
#endif
void dbengine_init(char *hostname) {
static void dbengine_init(const char *hostname) {
#ifdef ENABLE_DBENGINE
use_direct_io = config_get_boolean(CONFIG_SECTION_DB, "dbengine use direct io", use_direct_io);
@ -900,10 +900,10 @@ void dbengine_init(char *hostname) {
!config_exists(CONFIG_SECTION_DB, "dbengine tier 2 update every iterations") &&
!config_exists(CONFIG_SECTION_DB, "dbengine tier 3 update every iterations") &&
!config_exists(CONFIG_SECTION_DB, "dbengine tier 4 update every iterations") &&
!config_exists(CONFIG_SECTION_DB, "dbengine tier 1 disk space MB") &&
!config_exists(CONFIG_SECTION_DB, "dbengine tier 2 disk space MB") &&
!config_exists(CONFIG_SECTION_DB, "dbengine tier 3 disk space MB") &&
!config_exists(CONFIG_SECTION_DB, "dbengine tier 4 disk space MB"));
!config_exists(CONFIG_SECTION_DB, "dbengine tier 1 retention size") &&
!config_exists(CONFIG_SECTION_DB, "dbengine tier 2 retention size") &&
!config_exists(CONFIG_SECTION_DB, "dbengine tier 3 retention size") &&
!config_exists(CONFIG_SECTION_DB, "dbengine tier 4 retention size"));
default_backfill = get_dbengine_backfill(RRD_BACKFILL_NEW);
char dbengineconfig[200 + 1];
@ -925,11 +925,11 @@ void dbengine_init(char *hostname) {
storage_tiers_grouping_iterations[tier] = grouping_iterations;
}
default_multidb_disk_quota_mb = (int) config_get_number(CONFIG_SECTION_DB, "dbengine tier 0 disk space MB", RRDENG_DEFAULT_TIER_DISK_SPACE_MB);
default_multidb_disk_quota_mb = (int) config_get_size_mb(CONFIG_SECTION_DB, "dbengine tier 0 retention size", RRDENG_DEFAULT_TIER_DISK_SPACE_MB);
if(default_multidb_disk_quota_mb && default_multidb_disk_quota_mb < RRDENG_MIN_DISK_SPACE_MB) {
netdata_log_error("Invalid disk space %d for tier 0 given. Defaulting to %d.", default_multidb_disk_quota_mb, RRDENG_MIN_DISK_SPACE_MB);
default_multidb_disk_quota_mb = RRDENG_MIN_DISK_SPACE_MB;
config_set_number(CONFIG_SECTION_DB, "dbengine tier 0 disk space MB", default_multidb_disk_quota_mb);
config_set_size_mb(CONFIG_SECTION_DB, "dbengine tier 0 retention size", default_multidb_disk_quota_mb);
}
#ifdef OS_WINDOWS
@ -959,11 +959,11 @@ void dbengine_init(char *hostname) {
}
int disk_space_mb = tier ? RRDENG_DEFAULT_TIER_DISK_SPACE_MB : default_multidb_disk_quota_mb;
snprintfz(dbengineconfig, sizeof(dbengineconfig) - 1, "dbengine tier %zu disk space MB", tier);
disk_space_mb = config_get_number(CONFIG_SECTION_DB, dbengineconfig, disk_space_mb);
snprintfz(dbengineconfig, sizeof(dbengineconfig) - 1, "dbengine tier %zu retention size", tier);
disk_space_mb = config_get_size_mb(CONFIG_SECTION_DB, dbengineconfig, disk_space_mb);
snprintfz(dbengineconfig, sizeof(dbengineconfig) - 1, "dbengine tier %zu retention days", tier);
storage_tiers_retention_days[tier] = config_get_number(
snprintfz(dbengineconfig, sizeof(dbengineconfig) - 1, "dbengine tier %zu retention time", tier);
storage_tiers_retention_days[tier] = config_get_duration_days(
CONFIG_SECTION_DB, dbengineconfig, new_dbengine_defaults ? storage_tiers_retention_days[tier] : 0);
tiers_init[tier].disk_space_mb = (int) disk_space_mb;
@ -1026,7 +1026,7 @@ void dbengine_init(char *hostname) {
void api_v1_management_init(void);
int rrd_init(char *hostname, struct rrdhost_system_info *system_info, bool unittest) {
int rrd_init(const char *hostname, struct rrdhost_system_info *system_info, bool unittest) {
rrdhost_init();
if (unlikely(sql_init_meta_database(DB_CHECK_NONE, system_info ? 0 : 1))) {
@ -1445,6 +1445,11 @@ void rrdhost_set_is_parent_label(void) {
}
}
static bool config_label_cb(void *data __maybe_unused, const char *name, const char *value) {
rrdlabels_add(localhost->rrdlabels, name, value, RRDLABEL_SRC_CONFIG);
return true;
}
static void rrdhost_load_config_labels(void) {
int status = config_load(NULL, 1, CONFIG_SECTION_HOST_LABEL);
if(!status) {
@ -1454,16 +1459,7 @@ static void rrdhost_load_config_labels(void) {
filename);
}
struct section *co = appconfig_get_section(&netdata_config, CONFIG_SECTION_HOST_LABEL);
if(co) {
config_section_wrlock(co);
struct config_option *cv;
for(cv = co->values; cv ; cv = cv->next) {
rrdlabels_add(localhost->rrdlabels, cv->name, cv->value, RRDLABEL_SRC_CONFIG);
cv->flags |= CONFIG_VALUE_USED;
}
config_section_unlock(co);
}
appconfig_foreach_value_in_section(&netdata_config, CONFIG_SECTION_HOST_LABEL, config_label_cb, NULL);
}
static void rrdhost_load_kubernetes_labels(void) {

View file

@ -311,7 +311,7 @@ void sql_health_alarm_log_cleanup(RRDHOST *host)
int param = 0;
SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC));
SQLITE_BIND_FAIL(done, sqlite3_bind_int64(res, ++param, (sqlite3_int64)host->health_log.health_log_history));
SQLITE_BIND_FAIL(done, sqlite3_bind_int64(res, ++param, (sqlite3_int64)host->health_log.health_log_retention_s));
param = 0;
rc = sqlite3_step_monitored(res);

View file

@ -5,11 +5,7 @@
EXPORTING_OPTIONS global_exporting_options = EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_OPTION_SEND_NAMES;
const char *global_exporting_prefix = "netdata";
struct config exporting_config = { .first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
struct config exporting_config = APPCONFIG_INITIALIZER;
struct instance *prometheus_exporter_instance = NULL;
@ -32,7 +28,7 @@ static _CONNECTOR_INSTANCE *find_instance(const char *section)
return local_ci;
}
char *expconfig_get(struct config *root, const char *section, const char *name, const char *default_value)
static const char *expconfig_get(struct config *root, const char *section, const char *name, const char *default_value)
{
_CONNECTOR_INSTANCE *local_ci;
@ -243,7 +239,7 @@ struct engine *read_exporting_config()
prometheus_exporter_instance->config.options |= global_exporting_options & EXPORTING_OPTIONS_SOURCE_BITS;
char *data_source = prometheus_config_get("data source", "average");
const char *data_source = prometheus_config_get("data source", "average");
prometheus_exporter_instance->config.options =
exporting_parse_data_source(data_source, prometheus_exporter_instance->config.options);
@ -378,7 +374,7 @@ struct engine *read_exporting_config()
tmp_instance->config.hosts_pattern = simple_pattern_create(
exporter_get(instance_name, "send hosts matching", "localhost *"), NULL, SIMPLE_PATTERN_EXACT, true);
char *data_source = exporter_get(instance_name, "data source", "average");
const char *data_source = exporter_get(instance_name, "data source", "average");
tmp_instance->config.options = exporting_parse_data_source(data_source, tmp_instance->config.options);
if (EXPORTING_OPTIONS_DATA_SOURCE(tmp_instance->config.options) != EXPORTING_SOURCE_DATA_AS_COLLECTED &&

View file

@ -14,7 +14,7 @@ struct health_plugin_globals health_globals = {
.use_summary_for_notifications = true,
.health_log_entries_max = HEALTH_LOG_ENTRIES_DEFAULT,
.health_log_history = HEALTH_LOG_HISTORY_DEFAULT,
.health_log_retention_s = HEALTH_LOG_RETENTION_DEFAULT,
.default_warn_repeat_every = 0,
.default_crit_repeat_every = 0,
@ -55,17 +55,17 @@ static void health_load_config_defaults(void) {
health_globals.config.use_summary_for_notifications);
health_globals.config.default_warn_repeat_every =
config_get_duration(CONFIG_SECTION_HEALTH, "default repeat warning", "never");
config_get_duration_seconds(CONFIG_SECTION_HEALTH, "default repeat warning", 0);
health_globals.config.default_crit_repeat_every =
config_get_duration(CONFIG_SECTION_HEALTH, "default repeat critical", "never");
config_get_duration_seconds(CONFIG_SECTION_HEALTH, "default repeat critical", 0);
health_globals.config.health_log_entries_max =
config_get_number(CONFIG_SECTION_HEALTH, "in memory max health log entries",
health_globals.config.health_log_entries_max);
health_globals.config.health_log_history =
config_get_number(CONFIG_SECTION_HEALTH, "health log history", HEALTH_LOG_DEFAULT_HISTORY);
health_globals.config.health_log_retention_s =
config_get_duration_seconds(CONFIG_SECTION_HEALTH, "health log retention", HEALTH_LOG_RETENTION_DEFAULT);
snprintfz(filename, FILENAME_MAX, "%s/alarm-notify.sh", netdata_configured_primary_plugins_dir);
health_globals.config.default_exec =
@ -76,14 +76,13 @@ static void health_load_config_defaults(void) {
NULL, SIMPLE_PATTERN_EXACT, true);
health_globals.config.run_at_least_every_seconds =
(int)config_get_number(CONFIG_SECTION_HEALTH,
"run at least every seconds",
health_globals.config.run_at_least_every_seconds);
(int)config_get_duration_seconds(CONFIG_SECTION_HEALTH, "run at least every",
health_globals.config.run_at_least_every_seconds);
health_globals.config.postpone_alarms_during_hibernation_for_seconds =
config_get_number(CONFIG_SECTION_HEALTH,
"postpone alarms during hibernation for seconds",
health_globals.config.postpone_alarms_during_hibernation_for_seconds);
config_get_duration_seconds(CONFIG_SECTION_HEALTH,
"postpone alarms during hibernation for",
health_globals.config.postpone_alarms_during_hibernation_for_seconds);
health_globals.config.default_recipient =
string_strdupz("root");
@ -115,27 +114,27 @@ static void health_load_config_defaults(void) {
(long)health_globals.config.health_log_entries_max);
}
if (health_globals.config.health_log_history < HEALTH_LOG_MINIMUM_HISTORY) {
if (health_globals.config.health_log_retention_s < HEALTH_LOG_MINIMUM_HISTORY) {
nd_log(NDLS_DAEMON, NDLP_WARNING,
"Health configuration has invalid health log history %u. Using minimum %d",
health_globals.config.health_log_history, HEALTH_LOG_MINIMUM_HISTORY);
"Health configuration has invalid health log retention %u. Using minimum %d",
health_globals.config.health_log_retention_s, HEALTH_LOG_MINIMUM_HISTORY);
health_globals.config.health_log_history = HEALTH_LOG_MINIMUM_HISTORY;
config_set_number(CONFIG_SECTION_HEALTH, "health log history", health_globals.config.health_log_history);
health_globals.config.health_log_retention_s = HEALTH_LOG_MINIMUM_HISTORY;
config_set_duration_seconds(CONFIG_SECTION_HEALTH, "health log retention", health_globals.config.health_log_retention_s);
}
nd_log(NDLS_DAEMON, NDLP_DEBUG,
"Health log history is set to %u seconds (%u days)",
health_globals.config.health_log_history, health_globals.config.health_log_history / 86400);
health_globals.config.health_log_retention_s, health_globals.config.health_log_retention_s / 86400);
}
inline char *health_user_config_dir(void) {
inline const char *health_user_config_dir(void) {
char buffer[FILENAME_MAX + 1];
snprintfz(buffer, FILENAME_MAX, "%s/health.d", netdata_configured_user_config_dir);
return config_get(CONFIG_SECTION_DIRECTORIES, "health config", buffer);
}
inline char *health_stock_config_dir(void) {
inline const char *health_stock_config_dir(void) {
char buffer[FILENAME_MAX + 1];
snprintfz(buffer, FILENAME_MAX, "%s/health.d", netdata_configured_stock_config_dir);
return config_get(CONFIG_SECTION_DIRECTORIES, "stock health config", buffer);

View file

@ -34,8 +34,8 @@ void health_entry_flags_to_json_array(BUFFER *wb, const char *key, HEALTH_ENTRY_
#define HEALTH_LISTEN_BACKLOG 4096
#endif
#ifndef HEALTH_LOG_DEFAULT_HISTORY
#define HEALTH_LOG_DEFAULT_HISTORY 432000
#ifndef HEALTH_LOG_RETENTION_DEFAULT
#define HEALTH_LOG_RETENTION_DEFAULT (5 * 86400)
#endif
#ifndef HEALTH_LOG_MINIMUM_HISTORY
@ -75,8 +75,8 @@ ALARM_ENTRY* health_create_alarm_entry(
void health_alarm_log_add_entry(RRDHOST *host, ALARM_ENTRY *ae);
char *health_user_config_dir(void);
char *health_stock_config_dir(void);
const char *health_user_config_dir(void);
const char *health_stock_config_dir(void);
void health_alarm_log_free(RRDHOST *host);
void health_alarm_log_free_one_nochecks_nounlink(ALARM_ENTRY *ae);

View file

@ -29,14 +29,14 @@ static inline int health_parse_delay(
while(*s && isspace((uint8_t)*s)) *s++ = '\0';
if(!strcasecmp(key, "up")) {
if (!config_parse_duration(value, delay_up_duration)) {
if (!duration_parse_seconds(value, delay_up_duration)) {
netdata_log_error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword",
line, filename, value, key);
}
else given_up = 1;
}
else if(!strcasecmp(key, "down")) {
if (!config_parse_duration(value, delay_down_duration)) {
if (!duration_parse_seconds(value, delay_down_duration)) {
netdata_log_error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword",
line, filename, value, key);
}
@ -51,7 +51,7 @@ static inline int health_parse_delay(
else given_multiplier = 1;
}
else if(!strcasecmp(key, "max")) {
if (!config_parse_duration(value, delay_max_duration)) {
if (!duration_parse_seconds(value, delay_max_duration)) {
netdata_log_error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword",
line, filename, value, key);
}
@ -139,13 +139,13 @@ static inline int health_parse_repeat(
return 1;
}
if(!strcasecmp(key, "warning")) {
if (!config_parse_duration(value, (int*)warn_repeat_every)) {
if (!duration_parse_seconds(value, (int *)warn_repeat_every)) {
netdata_log_error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword",
line, file, value, key);
}
}
else if(!strcasecmp(key, "critical")) {
if (!config_parse_duration(value, (int*)crit_repeat_every)) {
if (!duration_parse_seconds(value, (int *)crit_repeat_every)) {
netdata_log_error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword",
line, file, value, key);
}
@ -273,7 +273,7 @@ static inline int health_parse_db_lookup(size_t line, const char *filename, char
while(*s && !isspace((uint8_t)*s)) s++;
while(*s && isspace((uint8_t)*s)) *s++ = '\0';
if(!config_parse_duration(key, &ac->after)) {
if(!duration_parse_seconds(key, &ac->after)) {
netdata_log_error("Health configuration at line %zu of file '%s': invalid duration '%s' after group method",
line, filename, key);
return 0;
@ -294,7 +294,7 @@ static inline int health_parse_db_lookup(size_t line, const char *filename, char
while(*s && !isspace((uint8_t)*s)) s++;
while(*s && isspace((uint8_t)*s)) *s++ = '\0';
if (!config_parse_duration(value, &ac->before)) {
if (!duration_parse_seconds(value, &ac->before)) {
netdata_log_error("Health configuration at line %zu of file '%s': invalid duration '%s' for '%s' keyword",
line, filename, value, key);
}
@ -304,7 +304,7 @@ static inline int health_parse_db_lookup(size_t line, const char *filename, char
while(*s && !isspace((uint8_t)*s)) s++;
while(*s && isspace((uint8_t)*s)) *s++ = '\0';
if (!config_parse_duration(value, &ac->update_every)) {
if (!duration_parse_seconds(value, &ac->update_every)) {
netdata_log_error("Health configuration at line %zu of file '%s': invalid duration '%s' for '%s' keyword",
line, filename, value, key);
}
@ -725,7 +725,7 @@ int health_readfile(const char *filename, void *data __maybe_unused, bool stock_
health_parse_db_lookup(line, filename, value, ac);
}
else if(hash == hash_every && !strcasecmp(key, HEALTH_EVERY_KEY)) {
if(!config_parse_duration(value, &ac->update_every))
if(!duration_parse_seconds(value, &ac->update_every))
netdata_log_error(
"Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' "
"cannot parse duration: '%s'.",

View file

@ -130,7 +130,7 @@ static void health_initialize_rrdhost(RRDHOST *host) {
rrdhost_flag_set(host, RRDHOST_FLAG_INITIALIZED_HEALTH);
host->health_log.max = health_globals.config.health_log_entries_max;
host->health_log.health_log_history = health_globals.config.health_log_history;
host->health_log.health_log_retention_s = health_globals.config.health_log_retention_s;
host->health.health_default_exec = string_dup(health_globals.config.default_exec);
host->health.health_default_recipient = string_dup(health_globals.config.default_recipient);
host->health.use_summary_for_notifications = health_globals.config.use_summary_for_notifications;

View file

@ -9,7 +9,7 @@
#define HEALTH_LOG_ENTRIES_MAX 100000U
#define HEALTH_LOG_ENTRIES_MIN 10U
#define HEALTH_LOG_HISTORY_DEFAULT (5 * 86400)
#define HEALTH_LOG_RETENTION_DEFAULT (5 * 86400)
#define HEALTH_CONF_MAX_LINE 4096
@ -76,7 +76,7 @@ struct health_plugin_globals {
bool use_summary_for_notifications;
unsigned int health_log_entries_max;
uint32_t health_log_history; // the health log history in seconds to be kept in db
uint32_t health_log_retention_s; // the health log retention in seconds to be kept in db
STRING *silencers_filename;
STRING *default_exec;

View file

@ -72,7 +72,7 @@ struct aral {
struct {
bool enabled;
const char *filename;
char **cache_dir;
const char **cache_dir;
} mmap;
} config;
@ -709,7 +709,7 @@ size_t aral_element_size(ARAL *ar) {
}
ARAL *aral_create(const char *name, size_t element_size, size_t initial_page_elements, size_t max_page_size,
struct aral_statistics *stats, const char *filename, char **cache_dir, bool mmap, bool lockless) {
struct aral_statistics *stats, const char *filename, const char **cache_dir, bool mmap, bool lockless) {
ARAL *ar = callocz(1, sizeof(ARAL));
ar->config.options = (lockless) ? ARAL_LOCKLESS : 0;
ar->config.requested_element_size = element_size;
@ -1078,7 +1078,7 @@ int aral_stress_test(size_t threads, size_t elements, size_t seconds) {
}
int aral_unittest(size_t elements) {
char *cache_dir = "/tmp/";
const char *cache_dir = "/tmp/";
struct aral_unittest_config auc = {
.single_threaded = true,

View file

@ -28,7 +28,7 @@ struct aral_statistics {
};
ARAL *aral_create(const char *name, size_t element_size, size_t initial_page_elements, size_t max_page_size,
struct aral_statistics *stats, const char *filename, char **cache_dir, bool mmap, bool lockless);
struct aral_statistics *stats, const char *filename, const char **cache_dir, bool mmap, bool lockless);
size_t aral_element_size(ARAL *ar);
size_t aral_overhead(ARAL *ar);
size_t aral_structures(ARAL *ar);

View file

@ -452,7 +452,7 @@ static inline collected_number uptime_from_boottime(void) {
}
static procfile *read_proc_uptime_ff = NULL;
static inline collected_number read_proc_uptime(char *filename) {
static inline collected_number read_proc_uptime(const char *filename) {
if(unlikely(!read_proc_uptime_ff)) {
read_proc_uptime_ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT);
if(unlikely(!read_proc_uptime_ff)) return 0;
@ -473,7 +473,7 @@ static inline collected_number read_proc_uptime(char *filename) {
return (collected_number)(strtondd(procfile_lineword(read_proc_uptime_ff, 0, 0), NULL) * 1000.0);
}
inline collected_number uptime_msec(char *filename){
inline collected_number uptime_msec(const char *filename){
static int use_boottime = -1;
if(unlikely(use_boottime == -1)) {

View file

@ -18,7 +18,12 @@ struct timespec {
typedef uint64_t nsec_t;
typedef uint64_t msec_t;
typedef uint64_t usec_t;
typedef int64_t snsec_t;
typedef int64_t susec_t;
typedef int64_t smsec_t;
typedef int64_t stime_t;
typedef struct heartbeat {
usec_t realtime;
@ -151,7 +156,7 @@ time_t now_sec(clockid_t clk_id);
usec_t now_usec(clockid_t clk_id);
int now_timeval(clockid_t clk_id, struct timeval *tv);
collected_number uptime_msec(char *filename);
collected_number uptime_msec(const char *filename);
extern usec_t clock_monotonic_resolution;
extern usec_t clock_realtime_resolution;

File diff suppressed because it is too large Load diff

View file

@ -103,7 +103,6 @@
#define CONFIG_SECTION_GLOBAL_STATISTICS "global statistics"
#define CONFIG_SECTION_DB "db"
// these are used to limit the configuration names and values lengths
// they are not enforced by config.c functions (they will strdup() all strings, no matter of their length)
#define CONFIG_MAX_NAME 1024
@ -113,94 +112,43 @@
// Config definitions
#define CONFIG_FILE_LINE_MAX ((CONFIG_MAX_NAME + CONFIG_MAX_VALUE + 1024) * 2)
#define CONFIG_VALUE_LOADED 0x01 // has been loaded from the config
#define CONFIG_VALUE_USED 0x02 // has been accessed from the program
#define CONFIG_VALUE_CHANGED 0x04 // has been changed from the loaded value or the internal default value
#define CONFIG_VALUE_CHECKED 0x08 // has been checked if the value is different from the default
struct config_option {
avl_t avl_node; // the index entry of this entry - this has to be first!
uint8_t flags;
uint32_t hash; // a simple hash to speed up searching
// we first compare hashes, and only if the hashes are equal we do string comparisons
char *name;
char *value;
struct config_option *next; // config->mutex protects just this
};
struct section {
avl_t avl_node; // the index entry of this section - this has to be first!
uint32_t hash; // a simple hash to speed up searching
// we first compare hashes, and only if the hashes are equal we do string comparisons
char *name;
struct section *next; // global config_mutex protects just this
struct config_option *values;
avl_tree_lock values_index;
netdata_mutex_t mutex; // this locks only the writers, to ensure atomic updates
// readers are protected using the rwlock in avl_tree_lock
};
struct config_section;
struct config {
struct section *first_section;
struct section *last_section; // optimize inserting at the end
netdata_mutex_t mutex;
struct config_section *sections;
SPINLOCK spinlock;
avl_tree_lock index;
};
#define CONFIG_BOOLEAN_INVALID 100 // an invalid value to check for validity (used as default initialization when needed)
#define CONFIG_BOOLEAN_NO 0 // disabled
#define CONFIG_BOOLEAN_YES 1 // enabled
#ifndef CONFIG_BOOLEAN_AUTO
#define CONFIG_BOOLEAN_AUTO 2 // enabled if it has useful info when enabled
#endif
#define APPCONFIG_INITIALIZER (struct config) { \
.sections = NULL, \
.spinlock = NETDATA_SPINLOCK_INITIALIZER, \
.index = { \
.avl_tree = { \
.root = NULL, \
.compar = appconfig_section_compare, \
}, \
.rwlock = AVL_LOCK_INITIALIZER, \
}, \
}
int appconfig_load(struct config *root, char *filename, int overwrite_used, const char *section_name);
void config_section_wrlock(struct section *co);
void config_section_unlock(struct section *co);
char *appconfig_get_by_section(struct section *co, const char *name, const char *default_value);
char *appconfig_get(struct config *root, const char *section, const char *name, const char *default_value);
long long appconfig_get_number(struct config *root, const char *section, const char *name, long long value);
NETDATA_DOUBLE appconfig_get_float(struct config *root, const char *section, const char *name, NETDATA_DOUBLE value);
int appconfig_get_boolean_by_section(struct section *co, const char *name, int value);
int appconfig_get_boolean(struct config *root, const char *section, const char *name, int value);
int appconfig_get_boolean_ondemand(struct config *root, const char *section, const char *name, int value);
int appconfig_get_duration(struct config *root, const char *section, const char *name, const char *value);
typedef bool (*appconfig_foreach_value_cb_t)(void *data, const char *name, const char *value);
size_t appconfig_foreach_value_in_section(struct config *root, const char *section, appconfig_foreach_value_cb_t cb, void *data);
const char *appconfig_set(struct config *root, const char *section, const char *name, const char *value);
const char *appconfig_set_default(struct config *root, const char *section, const char *name, const char *value);
long long appconfig_set_number(struct config *root, const char *section, const char *name, long long value);
NETDATA_DOUBLE appconfig_set_float(struct config *root, const char *section, const char *name, NETDATA_DOUBLE value);
int appconfig_set_boolean(struct config *root, const char *section, const char *name, int value);
// sets a raw value, only if it is not loaded from the config
void appconfig_set_default_raw_value(struct config *root, const char *section, const char *name, const char *value);
int appconfig_exists(struct config *root, const char *section, const char *name);
int appconfig_move(struct config *root, const char *section_old, const char *name_old, const char *section_new, const char *name_new);
int appconfig_move_everywhere(struct config *root, const char *name_old, const char *name_new);
void appconfig_generate(struct config *root, BUFFER *wb, int only_changed, bool netdata_conf);
int appconfig_section_compare(void *a, void *b);
void appconfig_section_destroy_non_loaded(struct config *root, const char *section);
void appconfig_section_option_destroy_non_loaded(struct config *root, const char *section, const char *name);
int config_parse_duration(const char* string, int* result);
struct section *appconfig_get_section(struct config *root, const char *name);
void appconfig_wrlock(struct config *root);
void appconfig_unlock(struct config *root);
int appconfig_test_boolean_value(char *s);
bool appconfig_test_boolean_value(const char *s);
struct connector_instance {
char instance_name[CONFIG_MAX_NAME + 1];
@ -208,13 +156,37 @@ struct connector_instance {
};
typedef struct _connector_instance {
struct section *connector; // actual connector
struct section *instance; // This instance
struct config_section *connector; // actual connector
struct config_section *instance; // This instance
char instance_name[CONFIG_MAX_NAME + 1];
char connector_name[CONFIG_MAX_NAME + 1];
struct _connector_instance *next; // Next instance
} _CONNECTOR_INSTANCE;
_CONNECTOR_INSTANCE *add_connector_instance(struct section *connector, struct section *instance);
_CONNECTOR_INSTANCE *add_connector_instance(struct config_section *connector, struct config_section *instance);
#endif /* NETDATA_CONFIG_H */
// ----------------------------------------------------------------------------
// shortcuts for the default netdata configuration
#define config_load(filename, overwrite_used, section) appconfig_load(&netdata_config, filename, overwrite_used, section)
#define config_set_default_raw_value(section, name, value) appconfig_set_default_raw_value(&netdata_config, section, name, value)
#define config_exists(section, name) appconfig_exists(&netdata_config, section, name)
#define config_move(section_old, name_old, section_new, name_new) appconfig_move(&netdata_config, section_old, name_old, section_new, name_new)
#define netdata_conf_generate(buffer, only_changed) appconfig_generate(&netdata_config, buffer, only_changed, true)
#define config_section_destroy(section) appconfig_section_destroy_non_loaded(&netdata_config, section)
#define config_section_option_destroy(section, name) appconfig_section_option_destroy_non_loaded(&netdata_config, section, name)
bool stream_conf_needs_dbengine(struct config *root);
bool stream_conf_has_uuid_section(struct config *root);
#include "appconfig_api_text.h"
#include "appconfig_api_numbers.h"
#include "appconfig_api_boolean.h"
#include "appconfig_api_sizes.h"
#include "appconfig_api_durations.h"
#endif // NETDATA_CONFIG_H

View file

@ -0,0 +1,68 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "appconfig_internals.h"
#include "appconfig_api_boolean.h"
bool appconfig_test_boolean_value(const char *s) {
if(!strcasecmp(s, "yes") || !strcasecmp(s, "true") || !strcasecmp(s, "on")
|| !strcasecmp(s, "auto") || !strcasecmp(s, "on demand"))
return true;
return false;
}
int appconfig_get_boolean_by_section(struct config_section *sect, const char *name, int value) {
struct config_option *opt = appconfig_get_raw_value_of_option_in_section(
sect, name, (!value) ? "no" : "yes", CONFIG_VALUE_TYPE_BOOLEAN, NULL);
if(!opt) return value;
return appconfig_test_boolean_value(string2str(opt->value));
}
int appconfig_get_boolean(struct config *root, const char *section, const char *name, int value) {
const char *s;
if(value) s = "yes";
else s = "no";
struct config_option *opt = appconfig_get_raw_value(root, section, name, s, CONFIG_VALUE_TYPE_BOOLEAN, NULL);
if(!opt) return value;
s = string2str(opt->value);
return appconfig_test_boolean_value(s);
}
int appconfig_get_boolean_ondemand(struct config *root, const char *section, const char *name, int value) {
const char *s;
if(value == CONFIG_BOOLEAN_AUTO)
s = "auto";
else if(value == CONFIG_BOOLEAN_NO)
s = "no";
else
s = "yes";
struct config_option *opt = appconfig_get_raw_value(root, section, name, s, CONFIG_VALUE_TYPE_BOOLEAN_ONDEMAND, NULL);
if(!opt) return value;
s = string2str(opt->value);
if(!strcmp(s, "yes") || !strcmp(s, "true") || !strcmp(s, "on"))
return CONFIG_BOOLEAN_YES;
else if(!strcmp(s, "no") || !strcmp(s, "false") || !strcmp(s, "off"))
return CONFIG_BOOLEAN_NO;
else if(!strcmp(s, "auto") || !strcmp(s, "on demand"))
return CONFIG_BOOLEAN_AUTO;
return value;
}
int appconfig_set_boolean(struct config *root, const char *section, const char *name, int value) {
const char *s;
if(value) s = "yes";
else s = "no";
appconfig_set_raw_value(root, section, name, s, CONFIG_VALUE_TYPE_BOOLEAN);
return value;
}

View file

@ -0,0 +1,24 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#ifndef NETDATA_APPCONFIG_API_BOOLEAN_H
#define NETDATA_APPCONFIG_API_BOOLEAN_H
#define CONFIG_BOOLEAN_INVALID 100 // an invalid value to check for validity (used as default initialization when needed)
#define CONFIG_BOOLEAN_NO 0 // disabled
#define CONFIG_BOOLEAN_YES 1 // enabled
#ifndef CONFIG_BOOLEAN_AUTO
#define CONFIG_BOOLEAN_AUTO 2 // enabled if it has useful info when enabled
#endif
int appconfig_get_boolean(struct config *root, const char *section, const char *name, int value);
#define config_get_boolean(section, name, value) appconfig_get_boolean(&netdata_config, section, name, value)
int appconfig_get_boolean_ondemand(struct config *root, const char *section, const char *name, int value);
#define config_get_boolean_ondemand(section, name, value) appconfig_get_boolean_ondemand(&netdata_config, section, name, value)
int appconfig_set_boolean(struct config *root, const char *section, const char *name, int value);
#define config_set_boolean(section, name, value) appconfig_set_boolean(&netdata_config, section, name, value)
#endif //NETDATA_APPCONFIG_API_BOOLEAN_H

View file

@ -0,0 +1,134 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "appconfig_internals.h"
#include "appconfig_api_durations.h"
static STRING *reformat_duration_seconds(STRING *value) {
int result = 0;
if(!duration_parse_seconds(string2str(value), &result))
return value;
char buf[128];
if(duration_snprintf_time_t(buf, sizeof(buf), result) > 0 && string_strcmp(value, buf) != 0) {
string_freez(value);
return string_strdupz(buf);
}
return value;
}
time_t appconfig_get_duration_seconds(struct config *root, const char *section, const char *name, time_t default_value) {
char default_str[128];
duration_snprintf_time_t(default_str, sizeof(default_str), default_value);
struct config_option *opt = appconfig_get_raw_value(
root, section, name, default_str, CONFIG_VALUE_TYPE_DURATION_IN_SECS, reformat_duration_seconds);
if(!opt)
return default_value;
const char *s = string2str(opt->value);
int result = 0;
if(!duration_parse_seconds(s, &result)) {
appconfig_set_raw_value(root, section, name, default_str, CONFIG_VALUE_TYPE_DURATION_IN_SECS);
netdata_log_error("config option '[%s].%s = %s' is configured with an invalid duration", section, name, s);
return default_value;
}
return ABS(result);
}
time_t appconfig_set_duration_seconds(struct config *root, const char *section, const char *name, time_t value) {
char str[128];
duration_snprintf_time_t(str, sizeof(str), value);
appconfig_set_raw_value(root, section, name, str, CONFIG_VALUE_TYPE_DURATION_IN_SECS);
return value;
}
static STRING *reformat_duration_ms(STRING *value) {
int64_t result = 0;
if(!duration_parse_msec_t(string2str(value), &result))
return value;
char buf[128];
if(duration_snprintf_msec_t(buf, sizeof(buf), result) > 0 && string_strcmp(value, buf) != 0) {
string_freez(value);
return string_strdupz(buf);
}
return value;
}
msec_t appconfig_get_duration_ms(struct config *root, const char *section, const char *name, msec_t default_value) {
char default_str[128];
duration_snprintf_msec_t(default_str, sizeof(default_str), default_value);
struct config_option *opt = appconfig_get_raw_value(
root, section, name, default_str, CONFIG_VALUE_TYPE_DURATION_IN_MS, reformat_duration_ms);
if(!opt)
return default_value;
const char *s = string2str(opt->value);
smsec_t result = 0;
if(!duration_parse_msec_t(s, &result)) {
appconfig_set_raw_value(root, section, name, default_str, CONFIG_VALUE_TYPE_DURATION_IN_MS);
netdata_log_error("config option '[%s].%s = %s' is configured with an invalid duration", section, name, s);
return default_value;
}
return ABS(result);
}
msec_t appconfig_set_duration_ms(struct config *root, const char *section, const char *name, msec_t value) {
char str[128];
duration_snprintf_msec_t(str, sizeof(str), (smsec_t)value);
appconfig_set_raw_value(root, section, name, str, CONFIG_VALUE_TYPE_DURATION_IN_MS);
return value;
}
static STRING *reformat_duration_days(STRING *value) {
int64_t result = 0;
if(!duration_parse_days(string2str(value), &result))
return value;
char buf[128];
if(duration_snprintf_days(buf, sizeof(buf), result) > 0 && string_strcmp(value, buf) != 0) {
string_freez(value);
return string_strdupz(buf);
}
return value;
}
unsigned appconfig_get_duration_days(struct config *root, const char *section, const char *name, unsigned default_value) {
char default_str[128];
duration_snprintf_days(default_str, sizeof(default_str), (int)default_value);
struct config_option *opt = appconfig_get_raw_value(
root, section, name, default_str, CONFIG_VALUE_TYPE_DURATION_IN_DAYS, reformat_duration_days);
if(!opt)
return default_value;
const char *s = string2str(opt->value);
int64_t result = 0;
if(!duration_parse_days(s, &result)) {
appconfig_set_raw_value(root, section, name, default_str, CONFIG_VALUE_TYPE_DURATION_IN_DAYS);
netdata_log_error("config option '[%s].%s = %s' is configured with an invalid duration", section, name, s);
return default_value;
}
return (unsigned)ABS(result);
}
unsigned appconfig_set_duration_days(struct config *root, const char *section, const char *name, unsigned value) {
char str[128];
duration_snprintf_days(str, sizeof(str), value);
appconfig_set_raw_value(root, section, name, str, CONFIG_VALUE_TYPE_DURATION_IN_DAYS);
return value;
}

View file

@ -0,0 +1,21 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#ifndef NETDATA_APPCONFIG_API_DURATIONS_H
#define NETDATA_APPCONFIG_API_DURATIONS_H
msec_t appconfig_get_duration_ms(struct config *root, const char *section, const char *name, msec_t default_value);
msec_t appconfig_set_duration_ms(struct config *root, const char *section, const char *name, msec_t value);
#define config_get_duration_ms(section, name, value) appconfig_get_duration_ms(&netdata_config, section, name, value)
#define config_set_duration_ms(section, name, value) appconfig_set_duration_ms(&netdata_config, section, name, value)
time_t appconfig_get_duration_seconds(struct config *root, const char *section, const char *name, time_t default_value);
time_t appconfig_set_duration_seconds(struct config *root, const char *section, const char *name, time_t value);
#define config_get_duration_seconds(section, name, value) appconfig_get_duration_seconds(&netdata_config, section, name, value)
#define config_set_duration_seconds(section, name, value) appconfig_set_duration_seconds(&netdata_config, section, name, value)
unsigned appconfig_get_duration_days(struct config *root, const char *section, const char *name, unsigned default_value);
unsigned appconfig_set_duration_days(struct config *root, const char *section, const char *name, unsigned value);
#define config_get_duration_days(section, name, value) appconfig_get_duration_days(&netdata_config, section, name, value)
#define config_set_duration_days(section, name, value) appconfig_set_duration_days(&netdata_config, section, name, value)
#endif //NETDATA_APPCONFIG_API_DURATIONS_H

View file

@ -0,0 +1,43 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "appconfig_internals.h"
#include "appconfig_api_numbers.h"
long long appconfig_get_number(struct config *root, const char *section, const char *name, long long value) {
char buffer[100];
sprintf(buffer, "%lld", value);
struct config_option *opt = appconfig_get_raw_value(root, section, name, buffer, CONFIG_VALUE_TYPE_INTEGER, NULL);
if(!opt) return value;
const char *s = string2str(opt->value);
return strtoll(s, NULL, 0);
}
NETDATA_DOUBLE appconfig_get_double(struct config *root, const char *section, const char *name, NETDATA_DOUBLE value) {
char buffer[100];
sprintf(buffer, "%0.5" NETDATA_DOUBLE_MODIFIER, value);
struct config_option *opt = appconfig_get_raw_value(root, section, name, buffer, CONFIG_VALUE_TYPE_DOUBLE, NULL);
if(!opt) return value;
const char *s = string2str(opt->value);
return str2ndd(s, NULL);
}
long long appconfig_set_number(struct config *root, const char *section, const char *name, long long value) {
char buffer[100];
sprintf(buffer, "%lld", value);
appconfig_set_raw_value(root, section, name, buffer, CONFIG_VALUE_TYPE_INTEGER);
return value;
}
NETDATA_DOUBLE appconfig_set_double(struct config *root, const char *section, const char *name, NETDATA_DOUBLE value) {
char buffer[100];
sprintf(buffer, "%0.5" NETDATA_DOUBLE_MODIFIER, value);
appconfig_set_raw_value(root, section, name, buffer, CONFIG_VALUE_TYPE_DOUBLE);
return value;
}

View file

@ -0,0 +1,16 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#ifndef NETDATA_APPCONFIG_API_NUMBERS_H
#define NETDATA_APPCONFIG_API_NUMBERS_H
long long appconfig_get_number(struct config *root, const char *section, const char *name, long long value);
long long appconfig_set_number(struct config *root, const char *section, const char *name, long long value);
#define config_get_number(section, name, value) appconfig_get_number(&netdata_config, section, name, value)
#define config_set_number(section, name, value) appconfig_set_number(&netdata_config, section, name, value)
NETDATA_DOUBLE appconfig_get_double(struct config *root, const char *section, const char *name, NETDATA_DOUBLE value);
NETDATA_DOUBLE appconfig_set_double(struct config *root, const char *section, const char *name, NETDATA_DOUBLE value);
#define config_get_double(section, name, value) appconfig_get_double(&netdata_config, section, name, value)
#define config_set_double(section, name, value) appconfig_set_float(&netdata_config, section, name, value)
#endif //NETDATA_APPCONFIG_API_NUMBERS_H

View file

@ -0,0 +1,86 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "appconfig_internals.h"
#include "appconfig_api_sizes.h"
static STRING *reformat_size_bytes(STRING *value) {
uint64_t result = 0;
if(!size_parse_bytes(string2str(value), &result))
return value;
char buf[128];
if(size_snprintf_bytes(buf, sizeof(buf), result) > 0 && string_strcmp(value, buf) != 0) {
string_freez(value);
return string_strdupz(buf);
}
return value;
}
uint64_t appconfig_get_size_bytes(struct config *root, const char *section, const char *name, uint64_t default_value) {
char default_str[128];
size_snprintf_bytes(default_str, sizeof(default_str), (int)default_value);
struct config_option *opt =
appconfig_get_raw_value(root, section, name, default_str, CONFIG_VALUE_TYPE_SIZE_IN_BYTES, reformat_size_bytes);
if(!opt)
return default_value;
const char *s = string2str(opt->value);
uint64_t result = 0;
if(!size_parse_bytes(s, &result)) {
appconfig_set_raw_value(root, section, name, default_str, CONFIG_VALUE_TYPE_SIZE_IN_BYTES);
netdata_log_error("config option '[%s].%s = %s' is configured with an invalid size", section, name, s);
return default_value;
}
return result;
}
uint64_t appconfig_set_size_bytes(struct config *root, const char *section, const char *name, uint64_t value) {
char str[128];
size_snprintf_bytes(str, sizeof(str), value);
appconfig_set_raw_value(root, section, name, str, CONFIG_VALUE_TYPE_SIZE_IN_BYTES);
return value;
}
static STRING *reformat_size_mb(STRING *value) {
uint64_t result = 0;
if(!size_parse_mb(string2str(value), &result))
return value;
char buf[128];
if(size_snprintf_mb(buf, sizeof(buf), result) > 0 && string_strcmp(value, buf) != 0) {
string_freez(value);
return string_strdupz(buf);
}
return value;
}
uint64_t appconfig_get_size_mb(struct config *root, const char *section, const char *name, uint64_t default_value) {
char default_str[128];
size_snprintf_mb(default_str, sizeof(default_str), (int)default_value);
struct config_option *opt =
appconfig_get_raw_value(root, section, name, default_str, CONFIG_VALUE_TYPE_SIZE_IN_MB, reformat_size_mb);
if(!opt)
return default_value;
const char *s = string2str(opt->value);
uint64_t result = 0;
if(!size_parse_mb(s, &result)) {
appconfig_set_raw_value(root, section, name, default_str, CONFIG_VALUE_TYPE_SIZE_IN_MB);
netdata_log_error("config option '[%s].%s = %s' is configured with an invalid size", section, name, s);
return default_value;
}
return (unsigned)result;
}
uint64_t appconfig_set_size_mb(struct config *root, const char *section, const char *name, uint64_t value) {
char str[128];
size_snprintf_mb(str, sizeof(str), value);
appconfig_set_raw_value(root, section, name, str, CONFIG_VALUE_TYPE_SIZE_IN_MB);
return value;
}

View file

@ -0,0 +1,16 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#ifndef NETDATA_APPCONFIG_API_SIZES_H
#define NETDATA_APPCONFIG_API_SIZES_H
uint64_t appconfig_get_size_bytes(struct config *root, const char *section, const char *name, uint64_t default_value);
uint64_t appconfig_set_size_bytes(struct config *root, const char *section, const char *name, uint64_t value);
#define config_get_size_bytes(section, name, value) appconfig_get_size_bytes(&netdata_config, section, name, value)
#define config_set_size_bytes(section, name, value) appconfig_set_size_bytes(&netdata_config, section, name, value)
uint64_t appconfig_get_size_mb(struct config *root, const char *section, const char *name, uint64_t default_value);
uint64_t appconfig_set_size_mb(struct config *root, const char *section, const char *name, uint64_t value);
#define config_get_size_mb(section, name, value) appconfig_get_size_mb(&netdata_config, section, name, value)
#define config_set_size_mb(section, name, value) appconfig_set_size_mb(&netdata_config, section, name, value)
#endif //NETDATA_APPCONFIG_API_SIZES_H

View file

@ -0,0 +1,17 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "appconfig_internals.h"
#include "appconfig_api_text.h"
const char *appconfig_get(struct config *root, const char *section, const char *name, const char *default_value) {
struct config_option *opt = appconfig_get_raw_value(root, section, name, default_value, CONFIG_VALUE_TYPE_TEXT, NULL);
if(!opt)
return default_value;
return string2str(opt->value);
}
const char *appconfig_set(struct config *root, const char *section, const char *name, const char *value) {
struct config_option *opt = appconfig_set_raw_value(root, section, name, value, CONFIG_VALUE_TYPE_TEXT);
return string2str(opt->value);
}

Some files were not shown because too many files have changed in this diff Show more