mirror of
https://github.com/netdata/netdata.git
synced 2025-04-16 18:37:50 +00:00
modularize the query api (#4443)
* modularized exporters * modularized API data queries * optimized queries * modularized API data reduction methods * modularized api queries * added new directories in makefiles * added median db query * moved all RRDR_GROUPING related to query.h * added stddev query * operational median and stddev * working simple exponential smoothing * too complex to do it right * fixed ses * fixed ses * rewrote query engine * fix double-exponential-smoothing * cleanup * fixed bug identified by @vlvkobal at rrdset_first_slot() * enable freeipmi on systems with libipmimonitoring; #4440
This commit is contained in:
parent
c09afb49a9
commit
09e89e937a
79 changed files with 3244 additions and 2142 deletions
CMakeLists.txtMakefile.am
collectors/cgroups.plugin
configure.acdatabase
health
libnetdata/statistical
web
api
Makefile.amREADME.md
badges
exporters
netdata-swagger.jsonnetdata-swagger.yamlqueries
rrd2json.crrd2json.hweb_api_v1.cweb_api_v1.hgui
server
|
@ -124,16 +124,15 @@ IF(LINUX)
|
|||
# ${MNL_INCLUDE_DIRS}
|
||||
ENDIF(LINUX)
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Detect libmnl
|
||||
|
||||
IF(LINUX)
|
||||
pkg_check_modules(NFACCT libnetfilter_acct)
|
||||
# later we use:
|
||||
# ${NFACCT_LIBRARIES}
|
||||
# ${NFACCT_CFLAGS_OTHER}
|
||||
# ${NFACCT_INCLUDE_DIRS}
|
||||
ENDIF(LINUX)
|
||||
pkg_check_modules(NFACCT libnetfilter_acct)
|
||||
# later we use:
|
||||
# ${NFACCT_LIBRARIES}
|
||||
# ${NFACCT_CFLAGS_OTHER}
|
||||
# ${NFACCT_INCLUDE_DIRS}
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
|
@ -370,6 +369,30 @@ set(API_PLUGIN_FILES
|
|||
web/api/web_api_v1.h
|
||||
web/api/badges/web_buffer_svg.c
|
||||
web/api/badges/web_buffer_svg.h
|
||||
web/api/exporters/allmetrics.c
|
||||
web/api/exporters/allmetrics.h
|
||||
web/api/exporters/shell/allmetrics_shell.c
|
||||
web/api/exporters/shell/allmetrics_shell.h
|
||||
web/api/queries/rrdr.c
|
||||
web/api/queries/rrdr.h
|
||||
web/api/queries/query.c
|
||||
web/api/queries/query.h
|
||||
web/api/queries/average/average.c
|
||||
web/api/queries/average/average.h
|
||||
web/api/queries/incremental_sum/incremental_sum.c
|
||||
web/api/queries/incremental_sum/incremental_sum.h
|
||||
web/api/queries/max/max.c
|
||||
web/api/queries/max/max.h
|
||||
web/api/queries/min/min.c
|
||||
web/api/queries/min/min.h
|
||||
web/api/queries/sum/sum.c
|
||||
web/api/queries/sum/sum.h
|
||||
web/api/queries/median/median.c
|
||||
web/api/queries/median/median.h
|
||||
web/api/queries/stddev/stddev.c
|
||||
web/api/queries/stddev/stddev.h
|
||||
web/api/queries/ses/ses.c
|
||||
web/api/queries/ses/ses.h
|
||||
)
|
||||
|
||||
set(STREAMING_PLUGIN_FILES
|
||||
|
@ -470,11 +493,6 @@ IF(LINUX)
|
|||
|
||||
SET(ENABLE_PLUGIN_CGROUP_NETWORK True)
|
||||
SET(ENABLE_PLUGIN_APPS True)
|
||||
IF(IPMI_LIBRARIES)
|
||||
SET(ENABLE_PLUGIN_FREEIPMI True)
|
||||
ELSE()
|
||||
SET(ENABLE_PLUGIN_FREEIPMI False)
|
||||
ENDIF()
|
||||
|
||||
ELSEIF(FREEBSD)
|
||||
add_executable(netdata config.h ${NETDATA_FILES} ${FREEBSD_PLUGIN_FILES})
|
||||
|
@ -483,7 +501,6 @@ ELSEIF(FREEBSD)
|
|||
target_compile_options(netdata PUBLIC ${NETDATA_COMMON_CFLAGS})
|
||||
SET(ENABLE_PLUGIN_CGROUP_NETWORK False)
|
||||
SET(ENABLE_PLUGIN_APPS True)
|
||||
SET(ENABLE_PLUGIN_FREEIPMI False)
|
||||
|
||||
ELSEIF(MACOS)
|
||||
add_executable(netdata config.h ${NETDATA_FILES} ${MACOS_PLUGIN_FILES})
|
||||
|
@ -492,10 +509,15 @@ ELSEIF(MACOS)
|
|||
target_compile_options(netdata PUBLIC ${NETDATA_COMMON_CFLAGS})
|
||||
SET(ENABLE_PLUGIN_CGROUP_NETWORK False)
|
||||
SET(ENABLE_PLUGIN_APPS False)
|
||||
SET(ENABLE_PLUGIN_FREEIPMI False)
|
||||
|
||||
ENDIF()
|
||||
|
||||
IF(IPMI_LIBRARIES)
|
||||
SET(ENABLE_PLUGIN_FREEIPMI True)
|
||||
ELSE()
|
||||
SET(ENABLE_PLUGIN_FREEIPMI False)
|
||||
ENDIF()
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# apps.plugin
|
||||
|
|
28
Makefile.am
28
Makefile.am
|
@ -283,12 +283,36 @@ RRD_PLUGIN_FILES = \
|
|||
$(NULL)
|
||||
|
||||
API_PLUGIN_FILES = \
|
||||
web/api/badges/web_buffer_svg.c \
|
||||
web/api/badges/web_buffer_svg.h \
|
||||
web/api/exporters/allmetrics.c \
|
||||
web/api/exporters/allmetrics.h \
|
||||
web/api/exporters/shell/allmetrics_shell.c \
|
||||
web/api/exporters/shell/allmetrics_shell.h \
|
||||
web/api/queries/average/average.c \
|
||||
web/api/queries/average/average.h \
|
||||
web/api/queries/incremental_sum/incremental_sum.c \
|
||||
web/api/queries/incremental_sum/incremental_sum.h \
|
||||
web/api/queries/max/max.c \
|
||||
web/api/queries/max/max.h \
|
||||
web/api/queries/median/median.c \
|
||||
web/api/queries/median/median.h \
|
||||
web/api/queries/min/min.c \
|
||||
web/api/queries/min/min.h \
|
||||
web/api/queries/query.c \
|
||||
web/api/queries/query.h \
|
||||
web/api/queries/rrdr.c \
|
||||
web/api/queries/rrdr.h \
|
||||
web/api/queries/ses/ses.c \
|
||||
web/api/queries/ses/ses.h \
|
||||
web/api/queries/stddev/stddev.c \
|
||||
web/api/queries/stddev/stddev.h \
|
||||
web/api/queries/sum/sum.c \
|
||||
web/api/queries/sum/sum.h \
|
||||
web/api/rrd2json.c \
|
||||
web/api/rrd2json.h \
|
||||
web/api/web_api_v1.c \
|
||||
web/api/web_api_v1.h \
|
||||
web/api/badges/web_buffer_svg.c \
|
||||
web/api/badges/web_buffer_svg.h \
|
||||
$(NULL)
|
||||
|
||||
STREAMING_PLUGIN_FILES = \
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "../../daemon/common.h"
|
||||
#include "libnetdata/libnetdata.h"
|
||||
|
||||
#ifdef HAVE_SETNS
|
||||
#ifndef _GNU_SOURCE
|
||||
|
|
12
configure.ac
12
configure.ac
|
@ -604,6 +604,18 @@ AC_CONFIG_FILES([
|
|||
web/Makefile
|
||||
web/api/Makefile
|
||||
web/api/badges/Makefile
|
||||
web/api/exporters/Makefile
|
||||
web/api/exporters/shell/Makefile
|
||||
web/api/exporters/prometheus/Makefile
|
||||
web/api/queries/Makefile
|
||||
web/api/queries/average/Makefile
|
||||
web/api/queries/incremental_sum/Makefile
|
||||
web/api/queries/max/Makefile
|
||||
web/api/queries/median/Makefile
|
||||
web/api/queries/min/Makefile
|
||||
web/api/queries/ses/Makefile
|
||||
web/api/queries/stddev/Makefile
|
||||
web/api/queries/sum/Makefile
|
||||
web/gui/Makefile
|
||||
web/server/Makefile
|
||||
web/server/single/Makefile
|
||||
|
|
|
@ -15,7 +15,7 @@ typedef struct rrdcalctemplate RRDCALCTEMPLATE;
|
|||
typedef struct alarm_entry ALARM_ENTRY;
|
||||
|
||||
#include "../daemon/common.h"
|
||||
|
||||
#include "web/api/queries/query.h"
|
||||
#include "rrdvar.h"
|
||||
#include "rrdsetvar.h"
|
||||
#include "rrddimvar.h"
|
||||
|
@ -749,28 +749,84 @@ extern void rrdset_isnot_obsolete(RRDSET *st);
|
|||
#define rrdset_first_entry_t(st) ((time_t)(rrdset_last_entry_t(st) - rrdset_duration(st)))
|
||||
|
||||
// get the last slot updated in the round robin database
|
||||
#define rrdset_last_slot(st) ((unsigned long)(((st)->current_entry == 0) ? (st)->entries - 1 : (st)->current_entry - 1))
|
||||
#define rrdset_last_slot(st) ((size_t)(((st)->current_entry == 0) ? (st)->entries - 1 : (st)->current_entry - 1))
|
||||
|
||||
// get the first / oldest slot updated in the round robin database
|
||||
#define rrdset_first_slot(st) ((unsigned long)( (((st)->counter >= ((unsigned long)(st)->entries)) ? (unsigned long)( ((unsigned long)(st)->current_entry > 0) ? ((unsigned long)(st)->current_entry) : ((unsigned long)(st)->entries) ) - 1 : 0) ))
|
||||
// #define rrdset_first_slot(st) ((size_t)( (((st)->counter >= ((unsigned long)(st)->entries)) ? (unsigned long)( ((unsigned long)(st)->current_entry > 0) ? ((unsigned long)(st)->current_entry) : ((unsigned long)(st)->entries) ) - 1 : 0) ))
|
||||
|
||||
// return the slot that has the oldest value
|
||||
|
||||
static inline size_t rrdset_first_slot(RRDSET *st) {
|
||||
if(st->counter >= (size_t)st->entries) {
|
||||
// the database has been rotated at least once
|
||||
// the oldest entry is the one that will be next
|
||||
// overwritten by data collection
|
||||
return (size_t)st->current_entry;
|
||||
}
|
||||
|
||||
// we do not have rotated the db yet
|
||||
// so 0 is the first entry
|
||||
return 0;
|
||||
}
|
||||
|
||||
// get the slot of the round robin database, for the given timestamp (t)
|
||||
// it always returns a valid slot, although may not be for the time requested if the time is outside the round robin database
|
||||
#define rrdset_time2slot(st, t) ( \
|
||||
( (time_t)(t) >= rrdset_last_entry_t(st)) ? ( rrdset_last_slot(st) ) : \
|
||||
( ((time_t)(t) <= rrdset_first_entry_t(st)) ? rrdset_first_slot(st) : \
|
||||
( (rrdset_last_slot(st) >= (unsigned long)((rrdset_last_entry_t(st) - (time_t)(t)) / (unsigned long)((st)->update_every)) ) ? \
|
||||
(rrdset_last_slot(st) - (unsigned long)((rrdset_last_entry_t(st) - (time_t)(t)) / (unsigned long)((st)->update_every)) ) : \
|
||||
(rrdset_last_slot(st) - (unsigned long)((rrdset_last_entry_t(st) - (time_t)(t)) / (unsigned long)((st)->update_every)) + (unsigned long)(st)->entries ) \
|
||||
)))
|
||||
static inline size_t rrdset_time2slot(RRDSET *st, time_t t) {
|
||||
size_t ret = 0;
|
||||
|
||||
if(t >= rrdset_last_entry_t(st)) {
|
||||
// the requested time is after the last entry we have
|
||||
ret = rrdset_last_slot(st);
|
||||
}
|
||||
else {
|
||||
if(t <= rrdset_first_entry_t(st)) {
|
||||
// the requested time is before the first entry we have
|
||||
ret = rrdset_first_slot(st);
|
||||
}
|
||||
else {
|
||||
if(rrdset_last_slot(st) >= ((rrdset_last_entry_t(st) - t) / (size_t)(st->update_every)))
|
||||
ret = rrdset_last_slot(st) - ((rrdset_last_entry_t(st) - t) / (size_t)(st->update_every));
|
||||
else
|
||||
ret = rrdset_last_slot(st) - ((rrdset_last_entry_t(st) - t) / (size_t)(st->update_every)) + (unsigned long)st->entries;
|
||||
}
|
||||
}
|
||||
|
||||
if(unlikely(ret >= (size_t)st->entries)) {
|
||||
error("INTERNAL ERROR: rrdset_time2slot() on %s returns values outside entries", st->name);
|
||||
ret = (size_t)(st->entries - 1);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
// get the timestamp of a specific slot in the round robin database
|
||||
#define rrdset_slot2time(st, slot) ( rrdset_last_entry_t(st) - \
|
||||
((unsigned long)(st)->update_every * ( \
|
||||
( (unsigned long)(slot) > rrdset_last_slot(st)) ? \
|
||||
( (rrdset_last_slot(st) - (unsigned long)(slot) + (unsigned long)(st)->entries) ) : \
|
||||
( (rrdset_last_slot(st) - (unsigned long)(slot)) )) \
|
||||
))
|
||||
static inline time_t rrdset_slot2time(RRDSET *st, size_t slot) {
|
||||
time_t ret;
|
||||
|
||||
if(slot >= (size_t)st->entries) {
|
||||
error("INTERNAL ERROR: caller of rrdset_slot2time() gives invalid slot %zu", slot);
|
||||
slot = (size_t)st->entries - 1;
|
||||
}
|
||||
|
||||
if(slot > rrdset_last_slot(st)) {
|
||||
ret = rrdset_last_entry_t(st) - (size_t)st->update_every * (rrdset_last_slot(st) - slot + (size_t)st->entries);
|
||||
}
|
||||
else {
|
||||
ret = rrdset_last_entry_t(st) - (size_t)st->update_every;
|
||||
}
|
||||
|
||||
if(unlikely(ret < rrdset_first_entry_t(st))) {
|
||||
error("INTERNAL ERROR: rrdset_slot2time() on %s returns time too far in the past", st->name);
|
||||
ret = rrdset_first_entry_t(st);
|
||||
}
|
||||
|
||||
if(unlikely(ret > rrdset_last_entry_t(st))) {
|
||||
error("INTERNAL ERROR: rrdset_slot2time() on %s returns time into the future", st->name);
|
||||
ret = rrdset_last_entry_t(st);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// RRD DIMENSION functions
|
||||
|
|
|
@ -363,7 +363,7 @@ inline RRDCALC *rrdcalc_create(RRDHOST *host, RRDCALCTEMPLATE *rt, const char *c
|
|||
(rc->recipient)?rc->recipient:"DEFAULT",
|
||||
rc->green,
|
||||
rc->red,
|
||||
rc->group,
|
||||
(int)rc->group,
|
||||
rc->after,
|
||||
rc->before,
|
||||
rc->options,
|
||||
|
|
|
@ -54,7 +54,7 @@ struct rrdcalc {
|
|||
// database lookup settings
|
||||
|
||||
char *dimensions; // the chart dimensions
|
||||
int group; // grouping method: average, max, etc.
|
||||
RRDR_GROUPING group; // grouping method: average, max, etc.
|
||||
int before; // ending point in time-series
|
||||
int after; // starting point in time-series
|
||||
uint32_t options; // calculation options
|
||||
|
|
|
@ -35,7 +35,7 @@ struct rrdcalctemplate {
|
|||
// database lookup settings
|
||||
|
||||
char *dimensions; // the chart dimensions
|
||||
int group; // grouping method: average, max, etc.
|
||||
RRDR_GROUPING group; // grouping method: average, max, etc.
|
||||
int before; // ending point in time-series
|
||||
int after; // starting point in time-series
|
||||
uint32_t options; // calculation options
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
#ifndef NETDATA_RRDVAR_H
|
||||
#define NETDATA_RRDVAR_H 1
|
||||
|
||||
#include "rrd.h"
|
||||
#include "libnetdata/libnetdata.h"
|
||||
|
||||
extern int rrdvar_compare(void *a, void *b);
|
||||
|
||||
|
@ -45,6 +45,12 @@ struct rrdvar {
|
|||
time_t last_updated;
|
||||
};
|
||||
|
||||
#define RRDVAR_MAX_LENGTH 1024
|
||||
|
||||
extern int rrdvar_fix_name(char *variable);
|
||||
|
||||
#include "rrd.h"
|
||||
|
||||
extern RRDVAR *rrdvar_custom_host_variable_create(RRDHOST *host, const char *name);
|
||||
extern void rrdvar_custom_host_variable_set(RRDHOST *host, RRDVAR *rv, calculated_number value);
|
||||
extern int foreach_host_variable_callback(RRDHOST *host, int (*callback)(RRDVAR *rv, void *data), void *data);
|
||||
|
@ -54,10 +60,6 @@ extern int rrdvar_callback_for_all_host_variables(RRDHOST *host, int (*callback
|
|||
|
||||
extern calculated_number rrdvar2number(RRDVAR *rv);
|
||||
|
||||
#define RRDVAR_MAX_LENGTH 1024
|
||||
|
||||
extern int rrdvar_fix_name(char *variable);
|
||||
|
||||
extern RRDVAR *rrdvar_create_and_index(const char *scope, avl_tree_lock *tree, const char *name, RRDVAR_TYPE type, RRDVAR_OPTIONS options, void *value);
|
||||
extern void rrdvar_free(RRDHOST *host, avl_tree_lock *tree, RRDVAR *rv);
|
||||
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#define NETDATA_HEALTH_INTERNALS
|
||||
#include "health.h"
|
||||
|
||||
unsigned int default_health_enabled = 1;
|
||||
|
|
|
@ -101,7 +101,7 @@ template: 1m_received_packets_rate
|
|||
os: linux freebsd
|
||||
hosts: *
|
||||
families: *
|
||||
lookup: average -1m of received
|
||||
lookup: average -1m unaligned of received
|
||||
units: packets
|
||||
every: 10s
|
||||
info: the average number of packets received during the last minute
|
||||
|
@ -111,7 +111,7 @@ template: 10s_received_packets_storm
|
|||
os: linux freebsd
|
||||
hosts: *
|
||||
families: *
|
||||
lookup: average -10s of received
|
||||
lookup: average -10s unaligned of received
|
||||
calc: $this * 100 / (($1m_received_packets_rate < 1000)?(1000):($1m_received_packets_rate))
|
||||
every: 10s
|
||||
units: %
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#define NETDATA_HEALTH_INTERNALS
|
||||
#include "health.h"
|
||||
|
||||
#define HEALTH_CONF_MAX_LINE 4096
|
||||
|
@ -54,7 +53,7 @@ static inline int rrdcalc_add_alarm_from_config(RRDHOST *host, RRDCALC *rc) {
|
|||
(rc->recipient)?rc->recipient:"DEFAULT",
|
||||
rc->green,
|
||||
rc->red,
|
||||
rc->group,
|
||||
(int)rc->group,
|
||||
rc->after,
|
||||
rc->before,
|
||||
rc->options,
|
||||
|
@ -108,7 +107,7 @@ static inline int rrdcalctemplate_add_template_from_config(RRDHOST *host, RRDCAL
|
|||
(rt->recipient)?rt->recipient:"DEFAULT",
|
||||
rt->green,
|
||||
rt->red,
|
||||
rt->group,
|
||||
(int)rt->group,
|
||||
rt->after,
|
||||
rt->before,
|
||||
rt->options,
|
||||
|
@ -288,7 +287,7 @@ static inline uint32_t health_parse_options(const char *s) {
|
|||
|
||||
static inline int health_parse_db_lookup(
|
||||
size_t line, const char *filename, char *string,
|
||||
int *group_method, int *after, int *before, int *every,
|
||||
RRDR_GROUPING *group_method, int *after, int *before, int *every,
|
||||
uint32_t *options, char **dimensions
|
||||
) {
|
||||
debug(D_HEALTH, "Health configuration parsing database lookup %zu@%s: %s", line, filename, string);
|
||||
|
@ -312,7 +311,7 @@ static inline int health_parse_db_lookup(
|
|||
return 0;
|
||||
}
|
||||
|
||||
if((*group_method = web_client_api_request_v1_data_group(key, -1)) == -1) {
|
||||
if((*group_method = web_client_api_request_v1_data_group(key, RRDR_GROUPING_UNDEFINED)) == RRDR_GROUPING_UNDEFINED) {
|
||||
error("Health configuration at line %zu of file '%s': invalid group method '%s'",
|
||||
line, filename, key);
|
||||
return 0;
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#define NETDATA_HEALTH_INTERNALS
|
||||
#include "health.h"
|
||||
|
||||
static inline void health_string2json(BUFFER *wb, const char *prefix, const char *label, const char *value, const char *suffix) {
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#define NETDATA_HEALTH_INTERNALS
|
||||
#include "health.h"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
|
|
@ -2,38 +2,35 @@
|
|||
|
||||
#include "../libnetdata.h"
|
||||
|
||||
LONG_DOUBLE default_single_exponential_smoothing_alpha = 0.1;
|
||||
|
||||
void log_series_to_stderr(LONG_DOUBLE *series, size_t entries, calculated_number result, const char *msg) {
|
||||
const LONG_DOUBLE *value, *end = &series[entries];
|
||||
|
||||
fprintf(stderr, "%s of %zu entries [ ", msg, entries);
|
||||
for(value = series; value < end ;value++) {
|
||||
if(value != series) fprintf(stderr, ", ");
|
||||
fprintf(stderr, "%" LONG_DOUBLE_MODIFIER, *value);
|
||||
}
|
||||
fprintf(stderr, " ] results in " CALCULATED_NUMBER_FORMAT "\n", result);
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
inline LONG_DOUBLE sum_and_count(const LONG_DOUBLE *series, size_t entries, size_t *count) {
|
||||
if(unlikely(entries == 0)) {
|
||||
if(likely(count))
|
||||
*count = 0;
|
||||
|
||||
return NAN;
|
||||
}
|
||||
|
||||
if(unlikely(entries == 1)) {
|
||||
if(likely(count))
|
||||
*count = (isnan(series[0])?0:1);
|
||||
|
||||
return series[0];
|
||||
}
|
||||
|
||||
size_t i, c = 0;
|
||||
const LONG_DOUBLE *value, *end = &series[entries];
|
||||
LONG_DOUBLE sum = 0;
|
||||
size_t c = 0;
|
||||
|
||||
for(i = 0; i < entries ; i++) {
|
||||
LONG_DOUBLE value = series[i];
|
||||
if(unlikely(isnan(value) || isinf(value))) continue;
|
||||
c++;
|
||||
sum += value;
|
||||
for(value = series; value < end ; value++) {
|
||||
if(isnormal(*value)) {
|
||||
sum += *value;
|
||||
c++;
|
||||
}
|
||||
}
|
||||
|
||||
if(likely(count))
|
||||
*count = c;
|
||||
|
||||
if(unlikely(c == 0))
|
||||
return NAN;
|
||||
if(unlikely(!c)) sum = NAN;
|
||||
if(likely(count)) *count = c;
|
||||
|
||||
return sum;
|
||||
}
|
||||
|
@ -46,9 +43,7 @@ inline LONG_DOUBLE average(const LONG_DOUBLE *series, size_t entries) {
|
|||
size_t count = 0;
|
||||
LONG_DOUBLE sum = sum_and_count(series, entries, &count);
|
||||
|
||||
if(unlikely(count == 0))
|
||||
return NAN;
|
||||
|
||||
if(unlikely(!count)) return NAN;
|
||||
return sum / (LONG_DOUBLE)count;
|
||||
}
|
||||
|
||||
|
@ -67,7 +62,7 @@ LONG_DOUBLE moving_average(const LONG_DOUBLE *series, size_t entries, size_t per
|
|||
|
||||
for(i = 0, count = 0; i < entries; i++) {
|
||||
LONG_DOUBLE value = series[i];
|
||||
if(unlikely(isnan(value) || isinf(value))) continue;
|
||||
if(unlikely(!isnormal(value))) continue;
|
||||
|
||||
if(unlikely(count < period)) {
|
||||
sum += value;
|
||||
|
@ -118,33 +113,25 @@ inline LONG_DOUBLE *copy_series(const LONG_DOUBLE *series, size_t entries) {
|
|||
}
|
||||
|
||||
LONG_DOUBLE median_on_sorted_series(const LONG_DOUBLE *series, size_t entries) {
|
||||
if(unlikely(entries == 0))
|
||||
return NAN;
|
||||
if(unlikely(entries == 0)) return NAN;
|
||||
if(unlikely(entries == 1)) return series[0];
|
||||
if(unlikely(entries == 2)) return (series[0] + series[1]) / 2;
|
||||
|
||||
if(unlikely(entries == 1))
|
||||
return series[0];
|
||||
|
||||
if(unlikely(entries == 2))
|
||||
return (series[0] + series[1]) / 2;
|
||||
|
||||
LONG_DOUBLE avg;
|
||||
LONG_DOUBLE average;
|
||||
if(entries % 2 == 0) {
|
||||
size_t m = entries / 2;
|
||||
avg = (series[m] + series[m + 1]) / 2;
|
||||
average = (series[m] + series[m + 1]) / 2;
|
||||
}
|
||||
else {
|
||||
avg = series[entries / 2];
|
||||
average = series[entries / 2];
|
||||
}
|
||||
|
||||
return avg;
|
||||
return average;
|
||||
}
|
||||
|
||||
LONG_DOUBLE median(const LONG_DOUBLE *series, size_t entries) {
|
||||
if(unlikely(entries == 0))
|
||||
return NAN;
|
||||
|
||||
if(unlikely(entries == 1))
|
||||
return series[0];
|
||||
if(unlikely(entries == 0)) return NAN;
|
||||
if(unlikely(entries == 1)) return series[0];
|
||||
|
||||
if(unlikely(entries == 2))
|
||||
return (series[0] + series[1]) / 2;
|
||||
|
@ -186,7 +173,7 @@ LONG_DOUBLE running_median_estimate(const LONG_DOUBLE *series, size_t entries) {
|
|||
|
||||
for(i = 0; i < entries ; i++) {
|
||||
LONG_DOUBLE value = series[i];
|
||||
if(unlikely(isnan(value) || isinf(value))) continue;
|
||||
if(unlikely(!isnormal(value))) continue;
|
||||
|
||||
average += ( value - average ) * 0.1f; // rough running average.
|
||||
median += copysignl( average * 0.01, value - median );
|
||||
|
@ -198,47 +185,36 @@ LONG_DOUBLE running_median_estimate(const LONG_DOUBLE *series, size_t entries) {
|
|||
// --------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
LONG_DOUBLE standard_deviation(const LONG_DOUBLE *series, size_t entries) {
|
||||
if(unlikely(entries < 1))
|
||||
return NAN;
|
||||
if(unlikely(entries == 0)) return NAN;
|
||||
if(unlikely(entries == 1)) return series[0];
|
||||
|
||||
if(unlikely(entries == 1))
|
||||
return series[0];
|
||||
const LONG_DOUBLE *value, *end = &series[entries];
|
||||
size_t count;
|
||||
LONG_DOUBLE sum;
|
||||
|
||||
size_t i, count = 0;
|
||||
LONG_DOUBLE sum = 0;
|
||||
|
||||
for(i = 0; i < entries ; i++) {
|
||||
LONG_DOUBLE value = series[i];
|
||||
if(unlikely(isnan(value) || isinf(value))) continue;
|
||||
|
||||
count++;
|
||||
sum += value;
|
||||
for(count = 0, sum = 0, value = series ; value < end ;value++) {
|
||||
if(likely(isnormal(*value))) {
|
||||
count++;
|
||||
sum += *value;
|
||||
}
|
||||
}
|
||||
|
||||
if(unlikely(count == 0))
|
||||
return NAN;
|
||||
|
||||
if(unlikely(count == 1))
|
||||
return sum;
|
||||
if(unlikely(count == 0)) return NAN;
|
||||
if(unlikely(count == 1)) return sum;
|
||||
|
||||
LONG_DOUBLE average = sum / (LONG_DOUBLE)count;
|
||||
|
||||
for(i = 0, count = 0, sum = 0; i < entries ; i++) {
|
||||
LONG_DOUBLE value = series[i];
|
||||
if(unlikely(isnan(value) || isinf(value))) continue;
|
||||
|
||||
count++;
|
||||
sum += powl(value - average, 2);
|
||||
for(count = 0, sum = 0, value = series ; value < end ;value++) {
|
||||
if(isnormal(*value)) {
|
||||
count++;
|
||||
sum += powl(*value - average, 2);
|
||||
}
|
||||
}
|
||||
|
||||
if(unlikely(count == 0))
|
||||
return NAN;
|
||||
|
||||
if(unlikely(count == 1))
|
||||
return average;
|
||||
|
||||
LONG_DOUBLE variance = sum / (LONG_DOUBLE)(count - 1); // remove -1 to have a population stddev
|
||||
if(unlikely(count == 0)) return NAN;
|
||||
if(unlikely(count == 1)) return average;
|
||||
|
||||
LONG_DOUBLE variance = sum / (LONG_DOUBLE)(count); // remove -1 from count to have a population stddev
|
||||
LONG_DOUBLE stddev = sqrtl(variance);
|
||||
return stddev;
|
||||
}
|
||||
|
@ -246,21 +222,36 @@ LONG_DOUBLE standard_deviation(const LONG_DOUBLE *series, size_t entries) {
|
|||
// --------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
LONG_DOUBLE single_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha) {
|
||||
size_t i, count = 0;
|
||||
LONG_DOUBLE level = 0, sum = 0;
|
||||
if(unlikely(entries == 0))
|
||||
return NAN;
|
||||
|
||||
if(unlikely(isnan(alpha)))
|
||||
alpha = 0.3;
|
||||
alpha = default_single_exponential_smoothing_alpha;
|
||||
|
||||
for(i = 0; i < entries ; i++) {
|
||||
LONG_DOUBLE value = series[i];
|
||||
if(unlikely(isnan(value) || isinf(value))) continue;
|
||||
count++;
|
||||
const LONG_DOUBLE *value = series, *end = &series[entries];
|
||||
LONG_DOUBLE level = (1.0 - alpha) * (*value);
|
||||
|
||||
sum += value;
|
||||
for(value++ ; value < end; value++) {
|
||||
if(likely(isnormal(*value)))
|
||||
level = alpha * (*value) + (1.0 - alpha) * level;
|
||||
}
|
||||
|
||||
LONG_DOUBLE last_level = level;
|
||||
level = alpha * value + (1.0 - alpha) * last_level;
|
||||
return level;
|
||||
}
|
||||
|
||||
LONG_DOUBLE single_exponential_smoothing_reverse(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha) {
|
||||
if(unlikely(entries == 0))
|
||||
return NAN;
|
||||
|
||||
if(unlikely(isnan(alpha)))
|
||||
alpha = default_single_exponential_smoothing_alpha;
|
||||
|
||||
const LONG_DOUBLE *value = &series[entries -1];
|
||||
LONG_DOUBLE level = (1.0 - alpha) * (*value);
|
||||
|
||||
for(value++ ; value >= series; value--) {
|
||||
if(likely(isnormal(*value)))
|
||||
level = alpha * (*value) + (1.0 - alpha) * level;
|
||||
}
|
||||
|
||||
return level;
|
||||
|
@ -270,8 +261,10 @@ LONG_DOUBLE single_exponential_smoothing(const LONG_DOUBLE *series, size_t entri
|
|||
|
||||
// http://grisha.org/blog/2016/02/16/triple-exponential-smoothing-forecasting-part-ii/
|
||||
LONG_DOUBLE double_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha, LONG_DOUBLE beta, LONG_DOUBLE *forecast) {
|
||||
size_t i, count = 0;
|
||||
LONG_DOUBLE level = series[0], trend, sum;
|
||||
if(unlikely(entries == 0))
|
||||
return NAN;
|
||||
|
||||
LONG_DOUBLE level, trend;
|
||||
|
||||
if(unlikely(isnan(alpha)))
|
||||
alpha = 0.3;
|
||||
|
@ -279,24 +272,22 @@ LONG_DOUBLE double_exponential_smoothing(const LONG_DOUBLE *series, size_t entri
|
|||
if(unlikely(isnan(beta)))
|
||||
beta = 0.05;
|
||||
|
||||
level = series[0];
|
||||
|
||||
if(likely(entries > 1))
|
||||
trend = series[1] - series[0];
|
||||
else
|
||||
trend = 0;
|
||||
|
||||
sum = series[0];
|
||||
const LONG_DOUBLE *value = series;
|
||||
for(value++ ; value >= series; value--) {
|
||||
if(likely(isnormal(*value))) {
|
||||
|
||||
for(i = 1; i < entries ; i++) {
|
||||
LONG_DOUBLE value = series[i];
|
||||
if(unlikely(isnan(value) || isinf(value))) continue;
|
||||
count++;
|
||||
LONG_DOUBLE last_level = level;
|
||||
level = alpha * *value + (1.0 - alpha) * (level + trend);
|
||||
trend = beta * (level - last_level) + (1.0 - beta) * trend;
|
||||
|
||||
sum += value;
|
||||
|
||||
LONG_DOUBLE last_level = level;
|
||||
|
||||
level = alpha * value + (1.0 - alpha) * (level + trend);
|
||||
trend = beta * (level - last_level) + (1.0 - beta) * trend;
|
||||
}
|
||||
}
|
||||
|
||||
if(forecast)
|
||||
|
|
|
@ -5,6 +5,12 @@
|
|||
|
||||
#include "../libnetdata.h"
|
||||
|
||||
#ifndef isnormal
|
||||
#define isnormal(x) (fpclassify(x) == FP_NORMAL)
|
||||
#endif
|
||||
|
||||
extern void log_series_to_stderr(LONG_DOUBLE *series, size_t entries, calculated_number result, const char *msg);
|
||||
|
||||
extern LONG_DOUBLE average(const LONG_DOUBLE *series, size_t entries);
|
||||
extern LONG_DOUBLE moving_average(const LONG_DOUBLE *series, size_t entries, size_t period);
|
||||
extern LONG_DOUBLE median(const LONG_DOUBLE *series, size_t entries);
|
||||
|
@ -12,6 +18,7 @@ extern LONG_DOUBLE moving_median(const LONG_DOUBLE *series, size_t entries, size
|
|||
extern LONG_DOUBLE running_median_estimate(const LONG_DOUBLE *series, size_t entries);
|
||||
extern LONG_DOUBLE standard_deviation(const LONG_DOUBLE *series, size_t entries);
|
||||
extern LONG_DOUBLE single_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha);
|
||||
extern LONG_DOUBLE single_exponential_smoothing_reverse(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha);
|
||||
extern LONG_DOUBLE double_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha, LONG_DOUBLE beta, LONG_DOUBLE *forecast);
|
||||
extern LONG_DOUBLE holtwinters(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha, LONG_DOUBLE beta, LONG_DOUBLE gamma, LONG_DOUBLE *forecast);
|
||||
extern LONG_DOUBLE sum_and_count(const LONG_DOUBLE *series, size_t entries, size_t *count);
|
||||
|
|
|
@ -5,8 +5,14 @@ MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
|
|||
|
||||
SUBDIR = \
|
||||
badges \
|
||||
queries \
|
||||
$(NULL)
|
||||
|
||||
dist_noinst_DATA = \
|
||||
README.md \
|
||||
$(NULL)
|
||||
|
||||
dist_web_DATA = \
|
||||
netdata-swagger.yaml \
|
||||
netdata-swagger.json \
|
||||
$(NULL)
|
||||
|
|
|
@ -16,68 +16,3 @@ and this [multi chart, jsfiddle example](https://jsfiddle.net/ktsaou/L5y2eqp2/):
|
|||
|
||||

|
||||
|
||||
|
||||
## using the api from shell scripts
|
||||
|
||||
Shell scripts can now query netdata easily:
|
||||
|
||||
```sh
|
||||
eval "$(curl -s 'http://localhost:19999/api/v1/allmetrics')"
|
||||
```
|
||||
|
||||
after this command, all the netdata metrics are exposed to shell. Check:
|
||||
|
||||
```sh
|
||||
# source the metrics
|
||||
eval "$(curl -s 'http://localhost:19999/api/v1/allmetrics')"
|
||||
|
||||
# let's see if there are variables exposed by netdata for system.cpu
|
||||
set | grep "^NETDATA_SYSTEM_CPU"
|
||||
|
||||
NETDATA_SYSTEM_CPU_GUEST=0
|
||||
NETDATA_SYSTEM_CPU_GUEST_NICE=0
|
||||
NETDATA_SYSTEM_CPU_IDLE=95
|
||||
NETDATA_SYSTEM_CPU_IOWAIT=0
|
||||
NETDATA_SYSTEM_CPU_IRQ=0
|
||||
NETDATA_SYSTEM_CPU_NICE=0
|
||||
NETDATA_SYSTEM_CPU_SOFTIRQ=0
|
||||
NETDATA_SYSTEM_CPU_STEAL=0
|
||||
NETDATA_SYSTEM_CPU_SYSTEM=1
|
||||
NETDATA_SYSTEM_CPU_USER=4
|
||||
NETDATA_SYSTEM_CPU_VISIBLETOTAL=5
|
||||
|
||||
# let's see the total cpu utilization of the system
|
||||
echo ${NETDATA_SYSTEM_CPU_VISIBLETOTAL}
|
||||
5
|
||||
|
||||
# what about alarms?
|
||||
set | grep "^NETDATA_ALARM_SYSTEM_SWAP_"
|
||||
NETDATA_ALARM_SYSTEM_SWAP_RAM_IN_SWAP_STATUS=CRITICAL
|
||||
NETDATA_ALARM_SYSTEM_SWAP_RAM_IN_SWAP_VALUE=53
|
||||
NETDATA_ALARM_SYSTEM_SWAP_USED_SWAP_STATUS=CLEAR
|
||||
NETDATA_ALARM_SYSTEM_SWAP_USED_SWAP_VALUE=51
|
||||
|
||||
# let's get the current status of the alarm 'ram in swap'
|
||||
echo ${NETDATA_ALARM_SYSTEM_SWAP_RAM_IN_SWAP_STATUS}
|
||||
CRITICAL
|
||||
|
||||
# is it fast?
|
||||
time curl -s 'http://localhost:19999/api/v1/allmetrics' >/dev/null
|
||||
|
||||
real 0m0,070s
|
||||
user 0m0,000s
|
||||
sys 0m0,007s
|
||||
|
||||
# it is...
|
||||
# 0.07 seconds for curl to be loaded, connect to netdata and fetch the response back...
|
||||
```
|
||||
|
||||
The `_VISIBLETOTAL` variable sums up all the dimensions of each chart.
|
||||
|
||||
The format of the variables is:
|
||||
|
||||
```sh
|
||||
NETDATA_${chart_id^^}_${dimension_id^^}="${value}"
|
||||
```
|
||||
|
||||
The value is rounded to the closest integer, since shell script cannot process decimal numbers.
|
||||
|
|
|
@ -887,3 +887,256 @@ void buffer_svg(BUFFER *wb, const char *label, calculated_number value, const ch
|
|||
label_width + value_width / 2 -1, ceil(height - text_offset), value_escaped,
|
||||
label_width + value_width / 2 -1, ceil(height - text_offset - 1.0), value_escaped);
|
||||
}
|
||||
|
||||
int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *url) {
|
||||
int ret = 400;
|
||||
buffer_flush(w->response.data);
|
||||
|
||||
BUFFER *dimensions = NULL;
|
||||
|
||||
const char *chart = NULL
|
||||
, *before_str = NULL
|
||||
, *after_str = NULL
|
||||
, *points_str = NULL
|
||||
, *multiply_str = NULL
|
||||
, *divide_str = NULL
|
||||
, *label = NULL
|
||||
, *units = NULL
|
||||
, *label_color = NULL
|
||||
, *value_color = NULL
|
||||
, *refresh_str = NULL
|
||||
, *precision_str = NULL
|
||||
, *scale_str = NULL
|
||||
, *alarm = NULL;
|
||||
|
||||
int group = RRDR_GROUPING_AVERAGE;
|
||||
uint32_t options = 0x00000000;
|
||||
|
||||
while(url) {
|
||||
char *value = mystrsep(&url, "/?&");
|
||||
if(!value || !*value) continue;
|
||||
|
||||
char *name = mystrsep(&value, "=");
|
||||
if(!name || !*name) continue;
|
||||
if(!value || !*value) continue;
|
||||
|
||||
debug(D_WEB_CLIENT, "%llu: API v1 badge.svg query param '%s' with value '%s'", w->id, name, value);
|
||||
|
||||
// name and value are now the parameters
|
||||
// they are not null and not empty
|
||||
|
||||
if(!strcmp(name, "chart")) chart = value;
|
||||
else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) {
|
||||
if(!dimensions)
|
||||
dimensions = buffer_create(100);
|
||||
|
||||
buffer_strcat(dimensions, "|");
|
||||
buffer_strcat(dimensions, value);
|
||||
}
|
||||
else if(!strcmp(name, "after")) after_str = value;
|
||||
else if(!strcmp(name, "before")) before_str = value;
|
||||
else if(!strcmp(name, "points")) points_str = value;
|
||||
else if(!strcmp(name, "group")) {
|
||||
group = web_client_api_request_v1_data_group(value, RRDR_GROUPING_AVERAGE);
|
||||
}
|
||||
else if(!strcmp(name, "options")) {
|
||||
options |= web_client_api_request_v1_data_options(value);
|
||||
}
|
||||
else if(!strcmp(name, "label")) label = value;
|
||||
else if(!strcmp(name, "units")) units = value;
|
||||
else if(!strcmp(name, "label_color")) label_color = value;
|
||||
else if(!strcmp(name, "value_color")) value_color = value;
|
||||
else if(!strcmp(name, "multiply")) multiply_str = value;
|
||||
else if(!strcmp(name, "divide")) divide_str = value;
|
||||
else if(!strcmp(name, "refresh")) refresh_str = value;
|
||||
else if(!strcmp(name, "precision")) precision_str = value;
|
||||
else if(!strcmp(name, "scale")) scale_str = value;
|
||||
else if(!strcmp(name, "alarm")) alarm = value;
|
||||
}
|
||||
|
||||
if(!chart || !*chart) {
|
||||
buffer_no_cacheable(w->response.data);
|
||||
buffer_sprintf(w->response.data, "No chart id is given at the request.");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
int scale = (scale_str && *scale_str)?str2i(scale_str):100;
|
||||
|
||||
RRDSET *st = rrdset_find(host, chart);
|
||||
if(!st) st = rrdset_find_byname(host, chart);
|
||||
if(!st) {
|
||||
buffer_no_cacheable(w->response.data);
|
||||
buffer_svg(w->response.data, "chart not found", NAN, "", NULL, NULL, -1, scale, 0);
|
||||
ret = 200;
|
||||
goto cleanup;
|
||||
}
|
||||
st->last_accessed_time = now_realtime_sec();
|
||||
|
||||
RRDCALC *rc = NULL;
|
||||
if(alarm) {
|
||||
rc = rrdcalc_find(st, alarm);
|
||||
if (!rc) {
|
||||
buffer_no_cacheable(w->response.data);
|
||||
buffer_svg(w->response.data, "alarm not found", NAN, "", NULL, NULL, -1, scale, 0);
|
||||
ret = 200;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
long long multiply = (multiply_str && *multiply_str )?str2l(multiply_str):1;
|
||||
long long divide = (divide_str && *divide_str )?str2l(divide_str):1;
|
||||
long long before = (before_str && *before_str )?str2l(before_str):0;
|
||||
long long after = (after_str && *after_str )?str2l(after_str):-st->update_every;
|
||||
int points = (points_str && *points_str )?str2i(points_str):1;
|
||||
int precision = (precision_str && *precision_str)?str2i(precision_str):-1;
|
||||
|
||||
if(!multiply) multiply = 1;
|
||||
if(!divide) divide = 1;
|
||||
|
||||
int refresh = 0;
|
||||
if(refresh_str && *refresh_str) {
|
||||
if(!strcmp(refresh_str, "auto")) {
|
||||
if(rc) refresh = rc->update_every;
|
||||
else if(options & RRDR_OPTION_NOT_ALIGNED)
|
||||
refresh = st->update_every;
|
||||
else {
|
||||
refresh = (int)(before - after);
|
||||
if(refresh < 0) refresh = -refresh;
|
||||
}
|
||||
}
|
||||
else {
|
||||
refresh = str2i(refresh_str);
|
||||
if(refresh < 0) refresh = -refresh;
|
||||
}
|
||||
}
|
||||
|
||||
if(!label) {
|
||||
if(alarm) {
|
||||
char *s = (char *)alarm;
|
||||
while(*s) {
|
||||
if(*s == '_') *s = ' ';
|
||||
s++;
|
||||
}
|
||||
label = alarm;
|
||||
}
|
||||
else if(dimensions) {
|
||||
const char *dim = buffer_tostring(dimensions);
|
||||
if(*dim == '|') dim++;
|
||||
label = dim;
|
||||
}
|
||||
else
|
||||
label = st->name;
|
||||
}
|
||||
if(!units) {
|
||||
if(alarm) {
|
||||
if(rc->units)
|
||||
units = rc->units;
|
||||
else
|
||||
units = "";
|
||||
}
|
||||
else if(options & RRDR_OPTION_PERCENTAGE)
|
||||
units = "%";
|
||||
else
|
||||
units = st->units;
|
||||
}
|
||||
|
||||
debug(D_WEB_CLIENT, "%llu: API command 'badge.svg' for chart '%s', alarm '%s', dimensions '%s', after '%lld', before '%lld', points '%d', group '%d', options '0x%08x'"
|
||||
, w->id
|
||||
, chart
|
||||
, alarm?alarm:""
|
||||
, (dimensions)?buffer_tostring(dimensions):""
|
||||
, after
|
||||
, before
|
||||
, points
|
||||
, group
|
||||
, options
|
||||
);
|
||||
|
||||
if(rc) {
|
||||
if (refresh > 0) {
|
||||
buffer_sprintf(w->response.header, "Refresh: %d\r\n", refresh);
|
||||
w->response.data->expires = now_realtime_sec() + refresh;
|
||||
}
|
||||
else buffer_no_cacheable(w->response.data);
|
||||
|
||||
if(!value_color) {
|
||||
switch(rc->status) {
|
||||
case RRDCALC_STATUS_CRITICAL:
|
||||
value_color = "red";
|
||||
break;
|
||||
|
||||
case RRDCALC_STATUS_WARNING:
|
||||
value_color = "orange";
|
||||
break;
|
||||
|
||||
case RRDCALC_STATUS_CLEAR:
|
||||
value_color = "brightgreen";
|
||||
break;
|
||||
|
||||
case RRDCALC_STATUS_UNDEFINED:
|
||||
value_color = "lightgrey";
|
||||
break;
|
||||
|
||||
case RRDCALC_STATUS_UNINITIALIZED:
|
||||
value_color = "#000";
|
||||
break;
|
||||
|
||||
default:
|
||||
value_color = "grey";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
buffer_svg(w->response.data,
|
||||
label,
|
||||
(isnan(rc->value)||isinf(rc->value)) ? rc->value : rc->value * multiply / divide,
|
||||
units,
|
||||
label_color,
|
||||
value_color,
|
||||
precision,
|
||||
scale,
|
||||
options
|
||||
);
|
||||
ret = 200;
|
||||
}
|
||||
else {
|
||||
time_t latest_timestamp = 0;
|
||||
int value_is_null = 1;
|
||||
calculated_number n = NAN;
|
||||
ret = 500;
|
||||
|
||||
// if the collected value is too old, don't calculate its value
|
||||
if (rrdset_last_entry_t(st) >= (now_realtime_sec() - (st->update_every * st->gap_when_lost_iterations_above)))
|
||||
ret = rrdset2value_api_v1(st, w->response.data, &n, (dimensions) ? buffer_tostring(dimensions) : NULL
|
||||
, points, after, before, group, 0, options, NULL, &latest_timestamp, &value_is_null);
|
||||
|
||||
// if the value cannot be calculated, show empty badge
|
||||
if (ret != 200) {
|
||||
buffer_no_cacheable(w->response.data);
|
||||
value_is_null = 1;
|
||||
n = 0;
|
||||
ret = 200;
|
||||
}
|
||||
else if (refresh > 0) {
|
||||
buffer_sprintf(w->response.header, "Refresh: %d\r\n", refresh);
|
||||
w->response.data->expires = now_realtime_sec() + refresh;
|
||||
}
|
||||
else buffer_no_cacheable(w->response.data);
|
||||
|
||||
// render the badge
|
||||
buffer_svg(w->response.data,
|
||||
label,
|
||||
(value_is_null)?NAN:(n * multiply / divide),
|
||||
units,
|
||||
label_color,
|
||||
value_color,
|
||||
precision,
|
||||
scale,
|
||||
options
|
||||
);
|
||||
}
|
||||
|
||||
cleanup:
|
||||
buffer_free(dimensions);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -3,9 +3,14 @@
|
|||
#ifndef NETDATA_WEB_BUFFER_SVG_H
|
||||
#define NETDATA_WEB_BUFFER_SVG_H 1
|
||||
|
||||
#include "web/api/web_api_v1.h"
|
||||
#include "libnetdata/libnetdata.h"
|
||||
#include "web/server/web_client.h"
|
||||
|
||||
extern void buffer_svg(BUFFER *wb, const char *label, calculated_number value, const char *units, const char *label_color, const char *value_color, int precision, int scale, uint32_t options);
|
||||
extern char *format_value_and_unit(char *value_string, size_t value_string_len, calculated_number value, const char *units, int precision);
|
||||
|
||||
extern int web_client_api_request_v1_badge(struct rrdhost *host, struct web_client *w, char *url);
|
||||
|
||||
#include "web/api/web_api_v1.h"
|
||||
|
||||
#endif /* NETDATA_WEB_BUFFER_SVG_H */
|
||||
|
|
8
web/api/exporters/Makefile.am
Normal file
8
web/api/exporters/Makefile.am
Normal file
|
@ -0,0 +1,8 @@
|
|||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
AUTOMAKE_OPTIONS = subdir-objects
|
||||
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
|
||||
|
||||
dist_noinst_DATA = \
|
||||
README.md \
|
||||
$(NULL)
|
0
web/api/exporters/README.md
Normal file
0
web/api/exporters/README.md
Normal file
113
web/api/exporters/allmetrics.c
Normal file
113
web/api/exporters/allmetrics.c
Normal file
|
@ -0,0 +1,113 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "allmetrics.h"
|
||||
|
||||
struct prometheus_output_options {
|
||||
char *name;
|
||||
PROMETHEUS_OUTPUT_OPTIONS flag;
|
||||
} prometheus_output_flags_root[] = {
|
||||
{ "help", PROMETHEUS_OUTPUT_HELP },
|
||||
{ "types", PROMETHEUS_OUTPUT_TYPES },
|
||||
{ "names", PROMETHEUS_OUTPUT_NAMES },
|
||||
{ "timestamps", PROMETHEUS_OUTPUT_TIMESTAMPS },
|
||||
{ "variables", PROMETHEUS_OUTPUT_VARIABLES },
|
||||
|
||||
// terminator
|
||||
{ NULL, PROMETHEUS_OUTPUT_NONE },
|
||||
};
|
||||
|
||||
inline int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url) {
|
||||
int format = ALLMETRICS_SHELL;
|
||||
const char *prometheus_server = w->client_ip;
|
||||
uint32_t prometheus_backend_options = global_backend_options;
|
||||
PROMETHEUS_OUTPUT_OPTIONS prometheus_output_options = PROMETHEUS_OUTPUT_TIMESTAMPS | ((global_backend_options & BACKEND_OPTION_SEND_NAMES)?PROMETHEUS_OUTPUT_NAMES:0);
|
||||
const char *prometheus_prefix = global_backend_prefix;
|
||||
|
||||
while(url) {
|
||||
char *value = mystrsep(&url, "?&");
|
||||
if (!value || !*value) continue;
|
||||
|
||||
char *name = mystrsep(&value, "=");
|
||||
if(!name || !*name) continue;
|
||||
if(!value || !*value) continue;
|
||||
|
||||
if(!strcmp(name, "format")) {
|
||||
if(!strcmp(value, ALLMETRICS_FORMAT_SHELL))
|
||||
format = ALLMETRICS_SHELL;
|
||||
else if(!strcmp(value, ALLMETRICS_FORMAT_PROMETHEUS))
|
||||
format = ALLMETRICS_PROMETHEUS;
|
||||
else if(!strcmp(value, ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS))
|
||||
format = ALLMETRICS_PROMETHEUS_ALL_HOSTS;
|
||||
else if(!strcmp(value, ALLMETRICS_FORMAT_JSON))
|
||||
format = ALLMETRICS_JSON;
|
||||
else
|
||||
format = 0;
|
||||
}
|
||||
else if(!strcmp(name, "server")) {
|
||||
prometheus_server = value;
|
||||
}
|
||||
else if(!strcmp(name, "prefix")) {
|
||||
prometheus_prefix = value;
|
||||
}
|
||||
else if(!strcmp(name, "data") || !strcmp(name, "source") || !strcmp(name, "data source") || !strcmp(name, "data-source") || !strcmp(name, "data_source") || !strcmp(name, "datasource")) {
|
||||
prometheus_backend_options = backend_parse_data_source(value, prometheus_backend_options);
|
||||
}
|
||||
else {
|
||||
int i;
|
||||
for(i = 0; prometheus_output_flags_root[i].name ; i++) {
|
||||
if(!strcmp(name, prometheus_output_flags_root[i].name)) {
|
||||
if(!strcmp(value, "yes") || !strcmp(value, "1") || !strcmp(value, "true"))
|
||||
prometheus_output_options |= prometheus_output_flags_root[i].flag;
|
||||
else
|
||||
prometheus_output_options &= ~prometheus_output_flags_root[i].flag;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
buffer_flush(w->response.data);
|
||||
buffer_no_cacheable(w->response.data);
|
||||
|
||||
switch(format) {
|
||||
case ALLMETRICS_JSON:
|
||||
w->response.data->contenttype = CT_APPLICATION_JSON;
|
||||
rrd_stats_api_v1_charts_allmetrics_json(host, w->response.data);
|
||||
return 200;
|
||||
|
||||
case ALLMETRICS_SHELL:
|
||||
w->response.data->contenttype = CT_TEXT_PLAIN;
|
||||
rrd_stats_api_v1_charts_allmetrics_shell(host, w->response.data);
|
||||
return 200;
|
||||
|
||||
case ALLMETRICS_PROMETHEUS:
|
||||
w->response.data->contenttype = CT_PROMETHEUS;
|
||||
rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(
|
||||
host
|
||||
, w->response.data
|
||||
, prometheus_server
|
||||
, prometheus_prefix
|
||||
, prometheus_backend_options
|
||||
, prometheus_output_options
|
||||
);
|
||||
return 200;
|
||||
|
||||
case ALLMETRICS_PROMETHEUS_ALL_HOSTS:
|
||||
w->response.data->contenttype = CT_PROMETHEUS;
|
||||
rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(
|
||||
host
|
||||
, w->response.data
|
||||
, prometheus_server
|
||||
, prometheus_prefix
|
||||
, prometheus_backend_options
|
||||
, prometheus_output_options
|
||||
);
|
||||
return 200;
|
||||
|
||||
default:
|
||||
w->response.data->contenttype = CT_TEXT_PLAIN;
|
||||
buffer_strcat(w->response.data, "Which format? '" ALLMETRICS_FORMAT_SHELL "', '" ALLMETRICS_FORMAT_PROMETHEUS "', '" ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS "' and '" ALLMETRICS_FORMAT_JSON "' are currently supported.");
|
||||
return 400;
|
||||
}
|
||||
}
|
11
web/api/exporters/allmetrics.h
Normal file
11
web/api/exporters/allmetrics.h
Normal file
|
@ -0,0 +1,11 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#ifndef NETDATA_API_ALLMETRICS_H
|
||||
#define NETDATA_API_ALLMETRICS_H
|
||||
|
||||
#include "../rrd2json.h"
|
||||
#include "shell/allmetrics_shell.h"
|
||||
|
||||
extern int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url);
|
||||
|
||||
#endif //NETDATA_API_ALLMETRICS_H
|
8
web/api/exporters/prometheus/Makefile.am
Normal file
8
web/api/exporters/prometheus/Makefile.am
Normal file
|
@ -0,0 +1,8 @@
|
|||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
AUTOMAKE_OPTIONS = subdir-objects
|
||||
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
|
||||
|
||||
dist_noinst_DATA = \
|
||||
README.md \
|
||||
$(NULL)
|
3
web/api/exporters/prometheus/README.md
Normal file
3
web/api/exporters/prometheus/README.md
Normal file
|
@ -0,0 +1,3 @@
|
|||
# prometheus exporter
|
||||
|
||||
The prometheus exporter for netdata is located at the [backends section for prometheus](../../../../backends/prometheus).
|
8
web/api/exporters/shell/Makefile.am
Normal file
8
web/api/exporters/shell/Makefile.am
Normal file
|
@ -0,0 +1,8 @@
|
|||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
AUTOMAKE_OPTIONS = subdir-objects
|
||||
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
|
||||
|
||||
dist_noinst_DATA = \
|
||||
README.md \
|
||||
$(NULL)
|
64
web/api/exporters/shell/README.md
Normal file
64
web/api/exporters/shell/README.md
Normal file
|
@ -0,0 +1,64 @@
|
|||
# shell exporter
|
||||
|
||||
Shell scripts can now query netdata:
|
||||
|
||||
```sh
|
||||
eval "$(curl -s 'http://localhost:19999/api/v1/allmetrics')"
|
||||
```
|
||||
|
||||
after this command, all the netdata metrics are exposed to shell. Check:
|
||||
|
||||
```sh
|
||||
# source the metrics
|
||||
eval "$(curl -s 'http://localhost:19999/api/v1/allmetrics')"
|
||||
|
||||
# let's see if there are variables exposed by netdata for system.cpu
|
||||
set | grep "^NETDATA_SYSTEM_CPU"
|
||||
|
||||
NETDATA_SYSTEM_CPU_GUEST=0
|
||||
NETDATA_SYSTEM_CPU_GUEST_NICE=0
|
||||
NETDATA_SYSTEM_CPU_IDLE=95
|
||||
NETDATA_SYSTEM_CPU_IOWAIT=0
|
||||
NETDATA_SYSTEM_CPU_IRQ=0
|
||||
NETDATA_SYSTEM_CPU_NICE=0
|
||||
NETDATA_SYSTEM_CPU_SOFTIRQ=0
|
||||
NETDATA_SYSTEM_CPU_STEAL=0
|
||||
NETDATA_SYSTEM_CPU_SYSTEM=1
|
||||
NETDATA_SYSTEM_CPU_USER=4
|
||||
NETDATA_SYSTEM_CPU_VISIBLETOTAL=5
|
||||
|
||||
# let's see the total cpu utilization of the system
|
||||
echo ${NETDATA_SYSTEM_CPU_VISIBLETOTAL}
|
||||
5
|
||||
|
||||
# what about alarms?
|
||||
set | grep "^NETDATA_ALARM_SYSTEM_SWAP_"
|
||||
NETDATA_ALARM_SYSTEM_SWAP_RAM_IN_SWAP_STATUS=CRITICAL
|
||||
NETDATA_ALARM_SYSTEM_SWAP_RAM_IN_SWAP_VALUE=53
|
||||
NETDATA_ALARM_SYSTEM_SWAP_USED_SWAP_STATUS=CLEAR
|
||||
NETDATA_ALARM_SYSTEM_SWAP_USED_SWAP_VALUE=51
|
||||
|
||||
# let's get the current status of the alarm 'ram in swap'
|
||||
echo ${NETDATA_ALARM_SYSTEM_SWAP_RAM_IN_SWAP_STATUS}
|
||||
CRITICAL
|
||||
|
||||
# is it fast?
|
||||
time curl -s 'http://localhost:19999/api/v1/allmetrics' >/dev/null
|
||||
|
||||
real 0m0,070s
|
||||
user 0m0,000s
|
||||
sys 0m0,007s
|
||||
|
||||
# it is...
|
||||
# 0.07 seconds for curl to be loaded, connect to netdata and fetch the response back...
|
||||
```
|
||||
|
||||
The `_VISIBLETOTAL` variable sums up all the dimensions of each chart.
|
||||
|
||||
The format of the variables is:
|
||||
|
||||
```sh
|
||||
NETDATA_${chart_id^^}_${dimension_id^^}="${value}"
|
||||
```
|
||||
|
||||
The value is rounded to the closest integer, since shell script cannot process decimal numbers.
|
159
web/api/exporters/shell/allmetrics_shell.c
Normal file
159
web/api/exporters/shell/allmetrics_shell.c
Normal file
|
@ -0,0 +1,159 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "allmetrics_shell.h"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// BASH
|
||||
// /api/v1/allmetrics?format=bash
|
||||
|
||||
static inline size_t shell_name_copy(char *d, const char *s, size_t usable) {
|
||||
size_t n;
|
||||
|
||||
for(n = 0; *s && n < usable ; d++, s++, n++) {
|
||||
register char c = *s;
|
||||
|
||||
if(unlikely(!isalnum(c))) *d = '_';
|
||||
else *d = (char)toupper(c);
|
||||
}
|
||||
*d = '\0';
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
#define SHELL_ELEMENT_MAX 100
|
||||
|
||||
void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, BUFFER *wb) {
|
||||
rrdhost_rdlock(host);
|
||||
|
||||
// for each chart
|
||||
RRDSET *st;
|
||||
rrdset_foreach_read(st, host) {
|
||||
calculated_number total = 0.0;
|
||||
char chart[SHELL_ELEMENT_MAX + 1];
|
||||
shell_name_copy(chart, st->name?st->name:st->id, SHELL_ELEMENT_MAX);
|
||||
|
||||
buffer_sprintf(wb, "\n# chart: %s (name: %s)\n", st->id, st->name);
|
||||
if(rrdset_is_available_for_viewers(st)) {
|
||||
rrdset_rdlock(st);
|
||||
|
||||
// for each dimension
|
||||
RRDDIM *rd;
|
||||
rrddim_foreach_read(rd, st) {
|
||||
if(rd->collections_counter) {
|
||||
char dimension[SHELL_ELEMENT_MAX + 1];
|
||||
shell_name_copy(dimension, rd->name?rd->name:rd->id, SHELL_ELEMENT_MAX);
|
||||
|
||||
calculated_number n = rd->last_stored_value;
|
||||
|
||||
if(isnan(n) || isinf(n))
|
||||
buffer_sprintf(wb, "NETDATA_%s_%s=\"\" # %s\n", chart, dimension, st->units);
|
||||
else {
|
||||
if(rd->multiplier < 0 || rd->divisor < 0) n = -n;
|
||||
n = calculated_number_round(n);
|
||||
if(!rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN)) total += n;
|
||||
buffer_sprintf(wb, "NETDATA_%s_%s=\"" CALCULATED_NUMBER_FORMAT_ZERO "\" # %s\n", chart, dimension, n, st->units);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
total = calculated_number_round(total);
|
||||
buffer_sprintf(wb, "NETDATA_%s_VISIBLETOTAL=\"" CALCULATED_NUMBER_FORMAT_ZERO "\" # %s\n", chart, total, st->units);
|
||||
rrdset_unlock(st);
|
||||
}
|
||||
}
|
||||
|
||||
buffer_strcat(wb, "\n# NETDATA ALARMS RUNNING\n");
|
||||
|
||||
RRDCALC *rc;
|
||||
for(rc = host->alarms; rc ;rc = rc->next) {
|
||||
if(!rc->rrdset) continue;
|
||||
|
||||
char chart[SHELL_ELEMENT_MAX + 1];
|
||||
shell_name_copy(chart, rc->rrdset->name?rc->rrdset->name:rc->rrdset->id, SHELL_ELEMENT_MAX);
|
||||
|
||||
char alarm[SHELL_ELEMENT_MAX + 1];
|
||||
shell_name_copy(alarm, rc->name, SHELL_ELEMENT_MAX);
|
||||
|
||||
calculated_number n = rc->value;
|
||||
|
||||
if(isnan(n) || isinf(n))
|
||||
buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_VALUE=\"\" # %s\n", chart, alarm, rc->units);
|
||||
else {
|
||||
n = calculated_number_round(n);
|
||||
buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_VALUE=\"" CALCULATED_NUMBER_FORMAT_ZERO "\" # %s\n", chart, alarm, n, rc->units);
|
||||
}
|
||||
|
||||
buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_STATUS=\"%s\"\n", chart, alarm, rrdcalc_status2string(rc->status));
|
||||
}
|
||||
|
||||
rrdhost_unlock(host);
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, BUFFER *wb) {
|
||||
rrdhost_rdlock(host);
|
||||
|
||||
buffer_strcat(wb, "{");
|
||||
|
||||
size_t chart_counter = 0;
|
||||
size_t dimension_counter = 0;
|
||||
|
||||
// for each chart
|
||||
RRDSET *st;
|
||||
rrdset_foreach_read(st, host) {
|
||||
if(rrdset_is_available_for_viewers(st)) {
|
||||
rrdset_rdlock(st);
|
||||
|
||||
buffer_sprintf(wb, "%s\n"
|
||||
"\t\"%s\": {\n"
|
||||
"\t\t\"name\":\"%s\",\n"
|
||||
"\t\t\"context\":\"%s\",\n"
|
||||
"\t\t\"units\":\"%s\",\n"
|
||||
"\t\t\"last_updated\": %ld,\n"
|
||||
"\t\t\"dimensions\": {"
|
||||
, chart_counter?",":""
|
||||
, st->id
|
||||
, st->name
|
||||
, st->context
|
||||
, st->units
|
||||
, rrdset_last_entry_t(st)
|
||||
);
|
||||
|
||||
chart_counter++;
|
||||
dimension_counter = 0;
|
||||
|
||||
// for each dimension
|
||||
RRDDIM *rd;
|
||||
rrddim_foreach_read(rd, st) {
|
||||
if(rd->collections_counter) {
|
||||
|
||||
buffer_sprintf(wb, "%s\n"
|
||||
"\t\t\t\"%s\": {\n"
|
||||
"\t\t\t\t\"name\": \"%s\",\n"
|
||||
"\t\t\t\t\"value\": "
|
||||
, dimension_counter?",":""
|
||||
, rd->id
|
||||
, rd->name
|
||||
);
|
||||
|
||||
if(isnan(rd->last_stored_value))
|
||||
buffer_strcat(wb, "null");
|
||||
else
|
||||
buffer_sprintf(wb, CALCULATED_NUMBER_FORMAT, rd->last_stored_value);
|
||||
|
||||
buffer_strcat(wb, "\n\t\t\t}");
|
||||
|
||||
dimension_counter++;
|
||||
}
|
||||
}
|
||||
|
||||
buffer_strcat(wb, "\n\t\t}\n\t}");
|
||||
rrdset_unlock(st);
|
||||
}
|
||||
}
|
||||
|
||||
buffer_strcat(wb, "\n}");
|
||||
rrdhost_unlock(host);
|
||||
}
|
||||
|
21
web/api/exporters/shell/allmetrics_shell.h
Normal file
21
web/api/exporters/shell/allmetrics_shell.h
Normal file
|
@ -0,0 +1,21 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#ifndef NETDATA_API_ALLMETRICS_SHELL_H
|
||||
#define NETDATA_API_ALLMETRICS_SHELL_H
|
||||
|
||||
#include "../allmetrics.h"
|
||||
|
||||
#define ALLMETRICS_FORMAT_SHELL "shell"
|
||||
#define ALLMETRICS_FORMAT_PROMETHEUS "prometheus"
|
||||
#define ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS "prometheus_all_hosts"
|
||||
#define ALLMETRICS_FORMAT_JSON "json"
|
||||
|
||||
#define ALLMETRICS_SHELL 1
|
||||
#define ALLMETRICS_PROMETHEUS 2
|
||||
#define ALLMETRICS_JSON 3
|
||||
#define ALLMETRICS_PROMETHEUS_ALL_HOSTS 4
|
||||
|
||||
extern void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, BUFFER *wb);
|
||||
extern void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, BUFFER *wb);
|
||||
|
||||
#endif //NETDATA_API_ALLMETRICS_SHELL_H
|
|
@ -127,6 +127,8 @@
|
|||
"min",
|
||||
"max",
|
||||
"average",
|
||||
"median",
|
||||
"stddev",
|
||||
"sum",
|
||||
"incremental-sum"
|
||||
],
|
||||
|
@ -310,6 +312,8 @@
|
|||
"min",
|
||||
"max",
|
||||
"average",
|
||||
"median",
|
||||
"stddev",
|
||||
"sum",
|
||||
"incremental-sum"
|
||||
],
|
|
@ -3,7 +3,7 @@ swagger: '2.0'
|
|||
info:
|
||||
title: NetData API
|
||||
description: 'Real-time performance and health monitoring.'
|
||||
version: 1.9.11_rolling
|
||||
version: 1.11.0_rolling
|
||||
host: registry.my-netdata.io
|
||||
schemes:
|
||||
- https
|
||||
|
@ -94,7 +94,7 @@ paths:
|
|||
description: 'The grouping method. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. methods supported "min", "max", "average", "sum", "incremental-sum". "max" is actually calculated on the absolute value collected (so it works for both positive and negative dimesions to return the most extreme value in either direction).'
|
||||
required: true
|
||||
type: string
|
||||
enum: [ 'min', 'max', 'average', 'sum', 'incremental-sum' ]
|
||||
enum: [ 'min', 'max', 'average', 'median', 'stddev', 'sum', 'incremental-sum' ]
|
||||
default: 'average'
|
||||
allowEmptyValue: false
|
||||
- name: gtime
|
||||
|
@ -204,7 +204,7 @@ paths:
|
|||
description: 'The grouping method. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. methods are supported "min", "max", "average", "sum", "incremental-sum". "max" is actually calculated on the absolute value collected (so it works for both positive and negative dimesions to return the most extreme value in either direction).'
|
||||
required: true
|
||||
type: string
|
||||
enum: [ 'min', 'max', 'average', 'sum', 'incremental-sum' ]
|
||||
enum: [ 'min', 'max', 'average', 'median', 'stddev', 'sum', 'incremental-sum' ]
|
||||
default: 'average'
|
||||
allowEmptyValue: false
|
||||
- name: options
|
18
web/api/queries/Makefile.am
Normal file
18
web/api/queries/Makefile.am
Normal file
|
@ -0,0 +1,18 @@
|
|||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
AUTOMAKE_OPTIONS = subdir-objects
|
||||
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
|
||||
|
||||
SUBDIRS = \
|
||||
average \
|
||||
incremental_sum \
|
||||
max \
|
||||
min \
|
||||
sum \
|
||||
median \
|
||||
stddev \
|
||||
$(NULL)
|
||||
|
||||
dist_noinst_DATA = \
|
||||
README.md \
|
||||
$(NULL)
|
0
web/api/queries/README.md
Normal file
0
web/api/queries/README.md
Normal file
8
web/api/queries/average/Makefile.am
Normal file
8
web/api/queries/average/Makefile.am
Normal file
|
@ -0,0 +1,8 @@
|
|||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
AUTOMAKE_OPTIONS = subdir-objects
|
||||
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
|
||||
|
||||
dist_noinst_DATA = \
|
||||
README.md \
|
||||
$(NULL)
|
0
web/api/queries/average/README.md
Normal file
0
web/api/queries/average/README.md
Normal file
58
web/api/queries/average/average.c
Normal file
58
web/api/queries/average/average.c
Normal file
|
@ -0,0 +1,58 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "average.h"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// average
|
||||
|
||||
struct grouping_average {
|
||||
calculated_number sum;
|
||||
size_t count;
|
||||
};
|
||||
|
||||
void *grouping_init_average(RRDR *r) {
|
||||
(void)r;
|
||||
return callocz(1, sizeof(struct grouping_average));
|
||||
}
|
||||
|
||||
// resets when switches dimensions
|
||||
// so, clear everything to restart
|
||||
void grouping_reset_average(RRDR *r) {
|
||||
struct grouping_average *g = (struct grouping_average *)r->grouping_data;
|
||||
g->sum = 0;
|
||||
g->count = 0;
|
||||
}
|
||||
|
||||
void grouping_free_average(RRDR *r) {
|
||||
freez(r->grouping_data);
|
||||
}
|
||||
|
||||
void grouping_add_average(RRDR *r, calculated_number value) {
|
||||
if(!isnan(value)) {
|
||||
struct grouping_average *g = (struct grouping_average *)r->grouping_data;
|
||||
g->sum += value;
|
||||
g->count++;
|
||||
}
|
||||
}
|
||||
|
||||
calculated_number grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
|
||||
struct grouping_average *g = (struct grouping_average *)r->grouping_data;
|
||||
|
||||
calculated_number value;
|
||||
|
||||
if(unlikely(!g->count)) {
|
||||
value = 0.0;
|
||||
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
|
||||
}
|
||||
else {
|
||||
if(unlikely(r->group_points != 1))
|
||||
value = g->sum / r->group_sum_divisor;
|
||||
else
|
||||
value = g->sum / g->count;
|
||||
}
|
||||
|
||||
g->sum = 0.0;
|
||||
g->count = 0;
|
||||
|
||||
return value;
|
||||
}
|
15
web/api/queries/average/average.h
Normal file
15
web/api/queries/average/average.h
Normal file
|
@ -0,0 +1,15 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#ifndef NETDATA_API_QUERY_AVERAGE_H
|
||||
#define NETDATA_API_QUERY_AVERAGE_H
|
||||
|
||||
#include "../query.h"
|
||||
#include "../rrdr.h"
|
||||
|
||||
extern void *grouping_init_average(RRDR *r);
|
||||
extern void grouping_reset_average(RRDR *r);
|
||||
extern void grouping_free_average(RRDR *r);
|
||||
extern void grouping_add_average(RRDR *r, calculated_number value);
|
||||
extern calculated_number grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
|
||||
|
||||
#endif //NETDATA_API_QUERY_AVERAGE_H
|
8
web/api/queries/incremental_sum/Makefile.am
Normal file
8
web/api/queries/incremental_sum/Makefile.am
Normal file
|
@ -0,0 +1,8 @@
|
|||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
AUTOMAKE_OPTIONS = subdir-objects
|
||||
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
|
||||
|
||||
dist_noinst_DATA = \
|
||||
README.md \
|
||||
$(NULL)
|
0
web/api/queries/incremental_sum/README.md
Normal file
0
web/api/queries/incremental_sum/README.md
Normal file
68
web/api/queries/incremental_sum/incremental_sum.c
Normal file
68
web/api/queries/incremental_sum/incremental_sum.c
Normal file
|
@ -0,0 +1,68 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "incremental_sum.h"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// incremental sum
|
||||
|
||||
struct grouping_incremental_sum {
|
||||
calculated_number first;
|
||||
calculated_number last;
|
||||
size_t count;
|
||||
};
|
||||
|
||||
void *grouping_init_incremental_sum(RRDR *r) {
|
||||
(void)r;
|
||||
return callocz(1, sizeof(struct grouping_incremental_sum));
|
||||
}
|
||||
|
||||
// resets when switches dimensions
|
||||
// so, clear everything to restart
|
||||
void grouping_reset_incremental_sum(RRDR *r) {
|
||||
struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->grouping_data;
|
||||
g->first = 0;
|
||||
g->last = 0;
|
||||
g->count = 0;
|
||||
}
|
||||
|
||||
void grouping_free_incremental_sum(RRDR *r) {
|
||||
freez(r->grouping_data);
|
||||
}
|
||||
|
||||
void grouping_add_incremental_sum(RRDR *r, calculated_number value) {
|
||||
if(!isnan(value)) {
|
||||
struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->grouping_data;
|
||||
|
||||
if(unlikely(!g->count)) {
|
||||
g->first = value;
|
||||
g->count++;
|
||||
}
|
||||
else {
|
||||
g->last = value;
|
||||
g->count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
calculated_number grouping_flush_incremental_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
|
||||
struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->grouping_data;
|
||||
|
||||
calculated_number value;
|
||||
|
||||
if(unlikely(!g->count)) {
|
||||
value = 0.0;
|
||||
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
|
||||
}
|
||||
else if(unlikely(g->count == 1)) {
|
||||
value = 0.0;
|
||||
}
|
||||
else {
|
||||
value = g->last - g->first;
|
||||
}
|
||||
|
||||
g->first = 0.0;
|
||||
g->last = 0.0;
|
||||
g->count = 0;
|
||||
|
||||
return value;
|
||||
}
|
15
web/api/queries/incremental_sum/incremental_sum.h
Normal file
15
web/api/queries/incremental_sum/incremental_sum.h
Normal file
|
@ -0,0 +1,15 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#ifndef NETDATA_API_QUERY_INCREMENTAL_SUM_H
|
||||
#define NETDATA_API_QUERY_INCREMENTAL_SUM_H
|
||||
|
||||
#include "../query.h"
|
||||
#include "../rrdr.h"
|
||||
|
||||
extern void *grouping_init_incremental_sum(RRDR *r);
|
||||
extern void grouping_reset_incremental_sum(RRDR *r);
|
||||
extern void grouping_free_incremental_sum(RRDR *r);
|
||||
extern void grouping_add_incremental_sum(RRDR *r, calculated_number value);
|
||||
extern calculated_number grouping_flush_incremental_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
|
||||
|
||||
#endif //NETDATA_API_QUERY_INCREMENTAL_SUM_H
|
8
web/api/queries/max/Makefile.am
Normal file
8
web/api/queries/max/Makefile.am
Normal file
|
@ -0,0 +1,8 @@
|
|||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
AUTOMAKE_OPTIONS = subdir-objects
|
||||
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
|
||||
|
||||
dist_noinst_DATA = \
|
||||
README.md \
|
||||
$(NULL)
|
0
web/api/queries/max/README.md
Normal file
0
web/api/queries/max/README.md
Normal file
59
web/api/queries/max/max.c
Normal file
59
web/api/queries/max/max.c
Normal file
|
@ -0,0 +1,59 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "max.h"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// max
|
||||
|
||||
struct grouping_max {
|
||||
calculated_number max;
|
||||
size_t count;
|
||||
};
|
||||
|
||||
void *grouping_init_max(RRDR *r) {
|
||||
(void)r;
|
||||
return callocz(1, sizeof(struct grouping_max));
|
||||
}
|
||||
|
||||
// resets when switches dimensions
|
||||
// so, clear everything to restart
|
||||
void grouping_reset_max(RRDR *r) {
|
||||
struct grouping_max *g = (struct grouping_max *)r->grouping_data;
|
||||
g->max = 0;
|
||||
g->count = 0;
|
||||
}
|
||||
|
||||
void grouping_free_max(RRDR *r) {
|
||||
freez(r->grouping_data);
|
||||
}
|
||||
|
||||
void grouping_add_max(RRDR *r, calculated_number value) {
|
||||
if(!isnan(value)) {
|
||||
struct grouping_max *g = (struct grouping_max *)r->grouping_data;
|
||||
|
||||
if(!g->count || calculated_number_fabs(value) > calculated_number_fabs(g->max)) {
|
||||
g->max = value;
|
||||
g->count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
calculated_number grouping_flush_max(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
|
||||
struct grouping_max *g = (struct grouping_max *)r->grouping_data;
|
||||
|
||||
calculated_number value;
|
||||
|
||||
if(unlikely(!g->count)) {
|
||||
value = 0.0;
|
||||
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
|
||||
}
|
||||
else {
|
||||
value = g->max;
|
||||
}
|
||||
|
||||
g->max = 0.0;
|
||||
g->count = 0;
|
||||
|
||||
return value;
|
||||
}
|
||||
|
15
web/api/queries/max/max.h
Normal file
15
web/api/queries/max/max.h
Normal file
|
@ -0,0 +1,15 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#ifndef NETDATA_API_QUERY_MAX_H
|
||||
#define NETDATA_API_QUERY_MAX_H
|
||||
|
||||
#include "../query.h"
|
||||
#include "../rrdr.h"
|
||||
|
||||
extern void *grouping_init_max(RRDR *r);
|
||||
extern void grouping_reset_max(RRDR *r);
|
||||
extern void grouping_free_max(RRDR *r);
|
||||
extern void grouping_add_max(RRDR *r, calculated_number value);
|
||||
extern calculated_number grouping_flush_max(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
|
||||
|
||||
#endif //NETDATA_API_QUERY_MAX_H
|
8
web/api/queries/median/Makefile.am
Normal file
8
web/api/queries/median/Makefile.am
Normal file
|
@ -0,0 +1,8 @@
|
|||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
AUTOMAKE_OPTIONS = subdir-objects
|
||||
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
|
||||
|
||||
dist_noinst_DATA = \
|
||||
README.md \
|
||||
$(NULL)
|
0
web/api/queries/median/README.md
Normal file
0
web/api/queries/median/README.md
Normal file
78
web/api/queries/median/median.c
Normal file
78
web/api/queries/median/median.c
Normal file
|
@ -0,0 +1,78 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "median.h"
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// median
|
||||
|
||||
struct grouping_median {
|
||||
size_t series_size;
|
||||
size_t next_pos;
|
||||
|
||||
LONG_DOUBLE series[];
|
||||
};
|
||||
|
||||
void *grouping_init_median(RRDR *r) {
|
||||
long entries = (r->group > r->group_points) ? r->group : r->group_points;
|
||||
if(entries < 0) entries = 0;
|
||||
|
||||
struct grouping_median *g = (struct grouping_median *)callocz(1, sizeof(struct grouping_median) + entries * sizeof(LONG_DOUBLE));
|
||||
g->series_size = (size_t)entries;
|
||||
|
||||
return g;
|
||||
}
|
||||
|
||||
// resets when switches dimensions
|
||||
// so, clear everything to restart
|
||||
void grouping_reset_median(RRDR *r) {
|
||||
struct grouping_median *g = (struct grouping_median *)r->grouping_data;
|
||||
g->next_pos = 0;
|
||||
}
|
||||
|
||||
void grouping_free_median(RRDR *r) {
|
||||
freez(r->grouping_data);
|
||||
}
|
||||
|
||||
void grouping_add_median(RRDR *r, calculated_number value) {
|
||||
struct grouping_median *g = (struct grouping_median *)r->grouping_data;
|
||||
|
||||
if(unlikely(g->next_pos >= g->series_size)) {
|
||||
error("INTERNAL ERROR: median buffer overflow on chart '%s' - next_pos = %zu, series_size = %zu, r->group = %ld, r->group_points = %ld.", r->st->name, g->next_pos, g->series_size, r->group, r->group_points);
|
||||
}
|
||||
else {
|
||||
if(isnormal(value))
|
||||
g->series[g->next_pos++] = (LONG_DOUBLE)value;
|
||||
}
|
||||
}
|
||||
|
||||
calculated_number grouping_flush_median(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
|
||||
struct grouping_median *g = (struct grouping_median *)r->grouping_data;
|
||||
|
||||
calculated_number value;
|
||||
|
||||
if(unlikely(!g->next_pos)) {
|
||||
value = 0.0;
|
||||
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
|
||||
}
|
||||
else {
|
||||
if(g->next_pos > 1) {
|
||||
sort_series(g->series, g->next_pos);
|
||||
value = (calculated_number)median_on_sorted_series(g->series, g->next_pos);
|
||||
}
|
||||
else
|
||||
value = (calculated_number)g->series[0];
|
||||
|
||||
if(!isnormal(value)) {
|
||||
value = 0.0;
|
||||
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
|
||||
}
|
||||
|
||||
//log_series_to_stderr(g->series, g->next_pos, value, "median");
|
||||
}
|
||||
|
||||
g->next_pos = 0;
|
||||
|
||||
return value;
|
||||
}
|
||||
|
15
web/api/queries/median/median.h
Normal file
15
web/api/queries/median/median.h
Normal file
|
@ -0,0 +1,15 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#ifndef NETDATA_API_QUERIES_MEDIAN_H
|
||||
#define NETDATA_API_QUERIES_MEDIAN_H
|
||||
|
||||
#include "../query.h"
|
||||
#include "../rrdr.h"
|
||||
|
||||
extern void *grouping_init_median(RRDR *r);
|
||||
extern void grouping_reset_median(RRDR *r);
|
||||
extern void grouping_free_median(RRDR *r);
|
||||
extern void grouping_add_median(RRDR *r, calculated_number value);
|
||||
extern calculated_number grouping_flush_median(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
|
||||
|
||||
#endif //NETDATA_API_QUERIES_MEDIAN_H
|
8
web/api/queries/min/Makefile.am
Normal file
8
web/api/queries/min/Makefile.am
Normal file
|
@ -0,0 +1,8 @@
|
|||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
AUTOMAKE_OPTIONS = subdir-objects
|
||||
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
|
||||
|
||||
dist_noinst_DATA = \
|
||||
README.md \
|
||||
$(NULL)
|
0
web/api/queries/min/README.md
Normal file
0
web/api/queries/min/README.md
Normal file
59
web/api/queries/min/min.c
Normal file
59
web/api/queries/min/min.c
Normal file
|
@ -0,0 +1,59 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "min.h"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// min
|
||||
|
||||
struct grouping_min {
|
||||
calculated_number min;
|
||||
size_t count;
|
||||
};
|
||||
|
||||
void *grouping_init_min(RRDR *r) {
|
||||
(void)r;
|
||||
return callocz(1, sizeof(struct grouping_min));
|
||||
}
|
||||
|
||||
// resets when switches dimensions
|
||||
// so, clear everything to restart
|
||||
void grouping_reset_min(RRDR *r) {
|
||||
struct grouping_min *g = (struct grouping_min *)r->grouping_data;
|
||||
g->min = 0;
|
||||
g->count = 0;
|
||||
}
|
||||
|
||||
void grouping_free_min(RRDR *r) {
|
||||
freez(r->grouping_data);
|
||||
}
|
||||
|
||||
void grouping_add_min(RRDR *r, calculated_number value) {
|
||||
if(!isnan(value)) {
|
||||
struct grouping_min *g = (struct grouping_min *)r->grouping_data;
|
||||
|
||||
if(!g->count || calculated_number_fabs(value) < calculated_number_fabs(g->min)) {
|
||||
g->min = value;
|
||||
g->count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
calculated_number grouping_flush_min(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
|
||||
struct grouping_min *g = (struct grouping_min *)r->grouping_data;
|
||||
|
||||
calculated_number value;
|
||||
|
||||
if(unlikely(!g->count)) {
|
||||
value = 0.0;
|
||||
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
|
||||
}
|
||||
else {
|
||||
value = g->min;
|
||||
}
|
||||
|
||||
g->min = 0.0;
|
||||
g->count = 0;
|
||||
|
||||
return value;
|
||||
}
|
||||
|
15
web/api/queries/min/min.h
Normal file
15
web/api/queries/min/min.h
Normal file
|
@ -0,0 +1,15 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#ifndef NETDATA_API_QUERY_MIN_H
|
||||
#define NETDATA_API_QUERY_MIN_H
|
||||
|
||||
#include "../query.h"
|
||||
#include "../rrdr.h"
|
||||
|
||||
extern void *grouping_init_min(RRDR *r);
|
||||
extern void grouping_reset_min(RRDR *r);
|
||||
extern void grouping_free_min(RRDR *r);
|
||||
extern void grouping_add_min(RRDR *r, calculated_number value);
|
||||
extern calculated_number grouping_flush_min(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
|
||||
|
||||
#endif //NETDATA_API_QUERY_MIN_H
|
729
web/api/queries/query.c
Normal file
729
web/api/queries/query.c
Normal file
|
@ -0,0 +1,729 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "query.h"
|
||||
#include "../rrd2json.h"
|
||||
#include "rrdr.h"
|
||||
|
||||
#include "average/average.h"
|
||||
#include "incremental_sum/incremental_sum.h"
|
||||
#include "max/max.h"
|
||||
#include "median/median.h"
|
||||
#include "min/min.h"
|
||||
#include "sum/sum.h"
|
||||
#include "stddev/stddev.h"
|
||||
#include "ses/ses.h"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
static struct {
|
||||
const char *name;
|
||||
uint32_t hash;
|
||||
RRDR_GROUPING value;
|
||||
void *(*init)(struct rrdresult *r);
|
||||
void (*reset)(struct rrdresult *r);
|
||||
void (*free)(struct rrdresult *r);
|
||||
void (*add)(struct rrdresult *r, calculated_number value);
|
||||
calculated_number (*flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
|
||||
} api_v1_data_groups[] = {
|
||||
{ "average" , 0, RRDR_GROUPING_AVERAGE , grouping_init_average , grouping_reset_average , grouping_free_average , grouping_add_average , grouping_flush_average }
|
||||
, { "incremental_sum" , 0, RRDR_GROUPING_INCREMENTAL_SUM, grouping_init_incremental_sum, grouping_reset_incremental_sum, grouping_free_incremental_sum, grouping_add_incremental_sum, grouping_flush_incremental_sum }
|
||||
, { "incremental-sum" , 0, RRDR_GROUPING_INCREMENTAL_SUM, grouping_init_incremental_sum, grouping_reset_incremental_sum, grouping_free_incremental_sum, grouping_add_incremental_sum, grouping_flush_incremental_sum }
|
||||
, { "median" , 0, RRDR_GROUPING_MEDIAN , grouping_init_median , grouping_reset_median , grouping_free_median , grouping_add_median , grouping_flush_median }
|
||||
, { "min" , 0, RRDR_GROUPING_MIN , grouping_init_min , grouping_reset_min , grouping_free_min , grouping_add_min , grouping_flush_min }
|
||||
, { "max" , 0, RRDR_GROUPING_MAX , grouping_init_max , grouping_reset_max , grouping_free_max , grouping_add_max , grouping_flush_max }
|
||||
, { "ses" , 0, RRDR_GROUPING_SES , grouping_init_ses , grouping_reset_ses , grouping_free_ses , grouping_add_ses , grouping_flush_ses }
|
||||
, { "stddev" , 0, RRDR_GROUPING_STDDEV , grouping_init_stddev , grouping_reset_stddev , grouping_free_stddev , grouping_add_stddev , grouping_flush_stddev }
|
||||
, { "sum" , 0, RRDR_GROUPING_SUM , grouping_init_sum , grouping_reset_sum , grouping_free_sum , grouping_add_sum , grouping_flush_sum }
|
||||
, { NULL , 0, RRDR_GROUPING_UNDEFINED , grouping_init_average , grouping_reset_average , grouping_free_average , grouping_add_average , grouping_flush_average }
|
||||
};
|
||||
|
||||
void web_client_api_v1_init_grouping(void) {
|
||||
int i;
|
||||
|
||||
for(i = 0; api_v1_data_groups[i].name ; i++)
|
||||
api_v1_data_groups[i].hash = simple_hash(api_v1_data_groups[i].name);
|
||||
}
|
||||
|
||||
const char *group_method2string(RRDR_GROUPING group) {
|
||||
int i;
|
||||
|
||||
for(i = 0; api_v1_data_groups[i].name ; i++) {
|
||||
if(api_v1_data_groups[i].value == group) {
|
||||
return api_v1_data_groups[i].name;
|
||||
}
|
||||
}
|
||||
|
||||
return "unknown-group-method";
|
||||
}
|
||||
|
||||
RRDR_GROUPING web_client_api_request_v1_data_group(const char *name, RRDR_GROUPING def) {
|
||||
int i;
|
||||
|
||||
uint32_t hash = simple_hash(name);
|
||||
for(i = 0; api_v1_data_groups[i].name ; i++)
|
||||
if(unlikely(hash == api_v1_data_groups[i].hash && !strcmp(name, api_v1_data_groups[i].name)))
|
||||
return api_v1_data_groups[i].value;
|
||||
|
||||
return def;
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
static void rrdr_disable_not_selected_dimensions(RRDR *r, RRDR_OPTIONS options, const char *dims) {
|
||||
rrdset_check_rdlock(r->st);
|
||||
|
||||
if(unlikely(!dims || !*dims || (dims[0] == '*' && dims[1] == '\0'))) return;
|
||||
|
||||
int match_ids = 0, match_names = 0;
|
||||
|
||||
if(unlikely(options & RRDR_OPTION_MATCH_IDS))
|
||||
match_ids = 1;
|
||||
if(unlikely(options & RRDR_OPTION_MATCH_NAMES))
|
||||
match_names = 1;
|
||||
|
||||
if(likely(!match_ids && !match_names))
|
||||
match_ids = match_names = 1;
|
||||
|
||||
SIMPLE_PATTERN *pattern = simple_pattern_create(dims, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
|
||||
|
||||
RRDDIM *d;
|
||||
long c, dims_selected = 0, dims_not_hidden_not_zero = 0;
|
||||
for(c = 0, d = r->st->dimensions; d ;c++, d = d->next) {
|
||||
if( (match_ids && simple_pattern_matches(pattern, d->id))
|
||||
|| (match_names && simple_pattern_matches(pattern, d->name))
|
||||
) {
|
||||
r->od[c] |= RRDR_DIMENSION_SELECTED;
|
||||
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) r->od[c] &= ~RRDR_DIMENSION_HIDDEN;
|
||||
dims_selected++;
|
||||
|
||||
// since the user needs this dimension
|
||||
// make it appear as NONZERO, to return it
|
||||
// even if the dimension has only zeros
|
||||
// unless option non_zero is set
|
||||
if(unlikely(!(options & RRDR_OPTION_NONZERO)))
|
||||
r->od[c] |= RRDR_DIMENSION_NONZERO;
|
||||
|
||||
// count the visible dimensions
|
||||
if(likely(r->od[c] & RRDR_DIMENSION_NONZERO))
|
||||
dims_not_hidden_not_zero++;
|
||||
}
|
||||
else {
|
||||
r->od[c] |= RRDR_DIMENSION_HIDDEN;
|
||||
if(unlikely(r->od[c] & RRDR_DIMENSION_SELECTED)) r->od[c] &= ~RRDR_DIMENSION_SELECTED;
|
||||
}
|
||||
}
|
||||
simple_pattern_free(pattern);
|
||||
|
||||
// check if all dimensions are hidden
|
||||
if(unlikely(!dims_not_hidden_not_zero && dims_selected)) {
|
||||
// there are a few selected dimensions
|
||||
// but they are all zero
|
||||
// enable the selected ones
|
||||
// to avoid returning an empty chart
|
||||
for(c = 0, d = r->st->dimensions; d ;c++, d = d->next)
|
||||
if(unlikely(r->od[c] & RRDR_DIMENSION_SELECTED))
|
||||
r->od[c] |= RRDR_DIMENSION_NONZERO;
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// helpers to find our way in RRDR
|
||||
|
||||
static inline RRDR_VALUE_FLAGS *rrdr_line_options(RRDR *r, long rrdr_line) {
|
||||
return &r->o[ rrdr_line * r->d ];
|
||||
}
|
||||
|
||||
static inline calculated_number *rrdr_line_values(RRDR *r, long rrdr_line) {
|
||||
return &r->v[ rrdr_line * r->d ];
|
||||
}
|
||||
|
||||
static inline long rrdr_line_init(RRDR *r, time_t t, long rrdr_line) {
|
||||
rrdr_line++;
|
||||
|
||||
#ifdef NETDATA_INTERNAL_CHECKS
|
||||
|
||||
if(unlikely(rrdr_line >= r->n))
|
||||
error("INTERNAL ERROR: requested to step above RRDR size for chart '%s'", r->st->name);
|
||||
|
||||
if(unlikely(r->t[rrdr_line] != 0 && r->t[rrdr_line] != t))
|
||||
error("INTERNAL ERROR: overwriting the timestamp of RRDR line %zu from %zu to %zu, of chart '%s'", (size_t)rrdr_line, (size_t)r->t[rrdr_line], (size_t)t, r->st->name);
|
||||
|
||||
#endif
|
||||
|
||||
// save the time
|
||||
r->t[rrdr_line] = t;
|
||||
|
||||
return rrdr_line;
|
||||
}
|
||||
|
||||
static inline void rrdr_done(RRDR *r, long rrdr_line) {
|
||||
r->rows = rrdr_line + 1;
|
||||
}
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// fill RRDR for a single dimension
|
||||
|
||||
static inline void do_dimension(
|
||||
RRDR *r
|
||||
, long points_wanted
|
||||
, RRDDIM *rd
|
||||
, long dim_id_in_rrdr
|
||||
, long after_slot
|
||||
, long before_slot
|
||||
, time_t after_wanted
|
||||
, time_t before_wanted
|
||||
){
|
||||
(void) before_slot;
|
||||
|
||||
RRDSET *st = r->st;
|
||||
|
||||
time_t
|
||||
now = after_wanted,
|
||||
dt = st->update_every,
|
||||
max_date = 0,
|
||||
min_date = 0;
|
||||
|
||||
long
|
||||
slot = after_slot,
|
||||
group_size = r->group,
|
||||
points_added = 0,
|
||||
values_in_group = 0,
|
||||
values_in_group_non_zero = 0,
|
||||
rrdr_line = -1,
|
||||
entries = st->entries;
|
||||
|
||||
RRDR_VALUE_FLAGS
|
||||
group_value_flags = RRDR_VALUE_NOTHING;
|
||||
|
||||
for( ; points_added < points_wanted ; now += dt, slot++ ) {
|
||||
if(unlikely(slot >= entries)) slot = 0;
|
||||
|
||||
// make sure we return data in the proper time range
|
||||
if(unlikely(now > before_wanted)) {
|
||||
#ifdef NETDATA_INTERNAL_CHECKS
|
||||
r->log = "stopped, because attempted to access the db after 'wanted before'";
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
if(unlikely(now < after_wanted)) {
|
||||
#ifdef NETDATA_INTERNAL_CHECKS
|
||||
r->log = "skipped, because attempted to access the db before 'wanted after'";
|
||||
#endif
|
||||
continue;
|
||||
}
|
||||
|
||||
// read the value from the database
|
||||
storage_number n = rd->values[slot];
|
||||
calculated_number value = NAN;
|
||||
if(likely(does_storage_number_exist(n))) {
|
||||
|
||||
value = unpack_storage_number(n);
|
||||
if(likely(value != 0.0))
|
||||
values_in_group_non_zero++;
|
||||
|
||||
if(unlikely(did_storage_number_reset(n)))
|
||||
group_value_flags |= RRDR_VALUE_RESET;
|
||||
|
||||
}
|
||||
|
||||
// add this value for grouping
|
||||
r->grouping_add(r, value);
|
||||
values_in_group++;
|
||||
|
||||
if(unlikely(values_in_group == group_size)) {
|
||||
rrdr_line = rrdr_line_init(r, now, rrdr_line);
|
||||
|
||||
if(unlikely(!min_date)) min_date = now;
|
||||
max_date = now;
|
||||
|
||||
// find the place to store our values
|
||||
RRDR_VALUE_FLAGS *rrdr_value_options_ptr = &r->o[rrdr_line * r->d + dim_id_in_rrdr];
|
||||
|
||||
// update the dimension options
|
||||
if(likely(values_in_group_non_zero))
|
||||
r->od[dim_id_in_rrdr] |= RRDR_DIMENSION_NONZERO;
|
||||
|
||||
// store the specific point options
|
||||
*rrdr_value_options_ptr = group_value_flags;
|
||||
|
||||
// store the value
|
||||
r->v[rrdr_line * r->d + dim_id_in_rrdr] = r->grouping_flush(r, rrdr_value_options_ptr);
|
||||
|
||||
points_added++;
|
||||
values_in_group = 0;
|
||||
group_value_flags = RRDR_VALUE_NOTHING;
|
||||
values_in_group_non_zero = 0;
|
||||
}
|
||||
}
|
||||
|
||||
r->before = max_date;
|
||||
r->after = min_date;
|
||||
rrdr_done(r, rrdr_line);
|
||||
|
||||
#ifdef NETDATA_INTERNAL_CHECKS
|
||||
if(unlikely(r->rows != points_added))
|
||||
error("INTERNAL ERROR: %s.%s added %zu rows, but RRDR says I added %zu.", r->st->name, rd->name, (size_t)points_added, (size_t)r->rows);
|
||||
#endif
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// fill RRDR for the whole chart
|
||||
|
||||
|
||||
static void rrd2rrdr_log_request_response_metdata(RRDR *r
|
||||
, RRDR_GROUPING group_method
|
||||
, int aligned
|
||||
, long group
|
||||
, long group_time
|
||||
, long group_points
|
||||
, time_t after_wanted
|
||||
, time_t after_requested
|
||||
, time_t before_wanted
|
||||
, time_t before_requested
|
||||
, long points_requested
|
||||
, long points_wanted
|
||||
, size_t after_slot
|
||||
, size_t before_slot
|
||||
, const char *msg
|
||||
) {
|
||||
info("INTERNAL ERROR: rrd2rrdr() on %s update every %d with %s grouping %s (group: %ld, gtime: %ld, gpoints: %ld), "
|
||||
"after (got: %zu, want: %zu, req: %zu, db: %zu), "
|
||||
"before (got: %zu, want: %zu, req: %zu, db: %zu), "
|
||||
"duration (got: %zu, want: %zu, req: %zu, db: %zu), "
|
||||
"slot (after: %zu, before: %zu, delta: %zu), "
|
||||
"points (got: %ld, want: %ld, req: %ld, db: %ld), "
|
||||
"%s"
|
||||
, r->st->name
|
||||
, r->st->update_every
|
||||
|
||||
// grouping
|
||||
, (aligned) ? "aligned" : "unaligned"
|
||||
, group_method2string(group_method)
|
||||
, group
|
||||
, group_time
|
||||
, group_points
|
||||
|
||||
// after
|
||||
, (size_t)r->after - (group - 1) * r->st->update_every
|
||||
, (size_t)after_wanted
|
||||
, (size_t)after_requested
|
||||
, (size_t)rrdset_first_entry_t(r->st)
|
||||
|
||||
// before
|
||||
, (size_t)r->before
|
||||
, (size_t)before_wanted
|
||||
, (size_t)before_requested
|
||||
, (size_t)rrdset_last_entry_t(r->st)
|
||||
|
||||
// duration
|
||||
, (size_t)(r->before - r->after + r->st->update_every)
|
||||
, (size_t)(before_wanted - after_wanted + r->st->update_every)
|
||||
, (size_t)(before_requested - after_requested + r->st->update_every)
|
||||
, (size_t)((rrdset_last_entry_t(r->st) - rrdset_first_entry_t(r->st)) + r->st->update_every)
|
||||
|
||||
// slot
|
||||
, after_slot
|
||||
, before_slot
|
||||
, (after_slot > before_slot) ? (r->st->entries - after_slot + before_slot) : (before_slot - after_slot)
|
||||
|
||||
// points
|
||||
, r->rows
|
||||
, points_wanted
|
||||
, points_requested
|
||||
, r->st->entries
|
||||
|
||||
// message
|
||||
, msg
|
||||
);
|
||||
}
|
||||
|
||||
RRDR *rrd2rrdr(
|
||||
RRDSET *st
|
||||
, long points_requested
|
||||
, long long after_requested
|
||||
, long long before_requested
|
||||
, RRDR_GROUPING group_method
|
||||
, long group_time_requested
|
||||
, RRDR_OPTIONS options
|
||||
, const char *dimensions
|
||||
) {
|
||||
int aligned = !(options & RRDR_OPTION_NOT_ALIGNED);
|
||||
|
||||
int absolute_period_requested = -1;
|
||||
|
||||
time_t first_entry_t = rrdset_first_entry_t(st);
|
||||
time_t last_entry_t = rrdset_last_entry_t(st);
|
||||
|
||||
if(before_requested == 0 && after_requested == 0) {
|
||||
// dump the all the data
|
||||
before_requested = last_entry_t;
|
||||
after_requested = first_entry_t;
|
||||
absolute_period_requested = 0;
|
||||
}
|
||||
|
||||
// allow relative for before (smaller than API_RELATIVE_TIME_MAX)
|
||||
if(((before_requested < 0)?-before_requested:before_requested) <= API_RELATIVE_TIME_MAX) {
|
||||
if(abs(before_requested) % st->update_every) {
|
||||
// make sure it is multiple of st->update_every
|
||||
if(before_requested < 0) before_requested = before_requested - st->update_every - before_requested % st->update_every;
|
||||
else before_requested = before_requested + st->update_every - before_requested % st->update_every;
|
||||
}
|
||||
if(before_requested > 0) before_requested = first_entry_t + before_requested;
|
||||
else before_requested = last_entry_t + before_requested;
|
||||
absolute_period_requested = 0;
|
||||
}
|
||||
|
||||
// allow relative for after (smaller than API_RELATIVE_TIME_MAX)
|
||||
if(((after_requested < 0)?-after_requested:after_requested) <= API_RELATIVE_TIME_MAX) {
|
||||
if(after_requested == 0) after_requested = -st->update_every;
|
||||
if(abs(after_requested) % st->update_every) {
|
||||
// make sure it is multiple of st->update_every
|
||||
if(after_requested < 0) after_requested = after_requested - st->update_every - after_requested % st->update_every;
|
||||
else after_requested = after_requested + st->update_every - after_requested % st->update_every;
|
||||
}
|
||||
after_requested = before_requested + after_requested;
|
||||
absolute_period_requested = 0;
|
||||
}
|
||||
|
||||
if(absolute_period_requested == -1)
|
||||
absolute_period_requested = 1;
|
||||
|
||||
// make sure they are within our timeframe
|
||||
if(before_requested > last_entry_t) before_requested = last_entry_t;
|
||||
if(before_requested < first_entry_t) before_requested = first_entry_t;
|
||||
|
||||
if(after_requested > last_entry_t) after_requested = last_entry_t;
|
||||
if(after_requested < first_entry_t) after_requested = first_entry_t;
|
||||
|
||||
// check if they are reversed
|
||||
if(after_requested > before_requested) {
|
||||
time_t tmp = before_requested;
|
||||
before_requested = after_requested;
|
||||
after_requested = tmp;
|
||||
}
|
||||
|
||||
// the duration of the chart
|
||||
time_t duration = before_requested - after_requested;
|
||||
long available_points = duration / st->update_every;
|
||||
|
||||
if(duration <= 0 || available_points <= 0)
|
||||
return rrdr_create(st, 1);
|
||||
|
||||
// check the number of wanted points in the result
|
||||
if(unlikely(points_requested < 0)) points_requested = -points_requested;
|
||||
if(unlikely(points_requested > available_points)) points_requested = available_points;
|
||||
if(unlikely(points_requested == 0)) points_requested = available_points;
|
||||
|
||||
// calculate the desired grouping of source data points
|
||||
long group = available_points / points_requested;
|
||||
if(unlikely(group <= 0)) group = 1;
|
||||
if(unlikely(available_points % points_requested > points_requested / 2)) group++; // rounding to the closest integer
|
||||
|
||||
// group_time enforces a certain grouping multiple
|
||||
calculated_number group_sum_divisor = 1.0;
|
||||
long group_points = 1;
|
||||
if(unlikely(group_time_requested > st->update_every)) {
|
||||
if (unlikely(group_time_requested > duration)) {
|
||||
// group_time is above the available duration
|
||||
|
||||
#ifdef NETDATA_INTERNAL_CHECKS
|
||||
info("INTERNAL CHECK: %s: requested gtime %ld secs, is greater than the desired duration %ld secs", st->id, group_time_requested, duration);
|
||||
#endif
|
||||
|
||||
group = points_requested; // use all the points
|
||||
}
|
||||
else {
|
||||
// the points we should group to satisfy gtime
|
||||
group_points = group_time_requested / st->update_every;
|
||||
if(unlikely(group_time_requested % group_points)) {
|
||||
#ifdef NETDATA_INTERNAL_CHECKS
|
||||
info("INTERNAL CHECK: %s: requested gtime %ld secs, is not a multiple of the chart's data collection frequency %d secs", st->id, group_time_requested, st->update_every);
|
||||
#endif
|
||||
|
||||
group_points++;
|
||||
}
|
||||
|
||||
// adapt group according to group_points
|
||||
if(unlikely(group < group_points)) group = group_points; // do not allow grouping below the desired one
|
||||
if(unlikely(group % group_points)) group += group_points - (group % group_points); // make sure group is multiple of group_points
|
||||
|
||||
//group_sum_divisor = group / group_points;
|
||||
group_sum_divisor = (calculated_number)(group * st->update_every) / (calculated_number)group_time_requested;
|
||||
}
|
||||
}
|
||||
|
||||
size_t before_slot, after_slot;
|
||||
|
||||
time_t before_wanted = before_requested - (before_requested % ( ((aligned)?group:1) * st->update_every )) + ( ((aligned)?group:1) * st->update_every );
|
||||
if(unlikely(before_wanted > last_entry_t)) {
|
||||
before_wanted = last_entry_t - (last_entry_t % ((aligned) ? group : 1) * st->update_every);
|
||||
|
||||
if(unlikely(before_wanted > last_entry_t)) {
|
||||
#ifdef NETDATA_INTERNAL_CHECKS
|
||||
error("INTERNAL ERROR: rrd2rrdr() on %s, before_wanted is after db max", st->name);
|
||||
#endif
|
||||
|
||||
while(before_wanted > last_entry_t)
|
||||
before_wanted -= (((aligned) ? group : 1) * st->update_every);
|
||||
}
|
||||
}
|
||||
before_slot = rrdset_time2slot(st, before_wanted);
|
||||
|
||||
// we need to estimate the number of points, for having
|
||||
// an integer number of values per point
|
||||
long points_wanted = (before_wanted - after_requested) / st->update_every / group;
|
||||
|
||||
time_t after_wanted = before_wanted - (points_wanted * group * st->update_every) + st->update_every;
|
||||
if(unlikely(after_wanted < first_entry_t)) {
|
||||
// hm... we go to the past, calculate again points_wanted using all the db from before_wanted to the beginning
|
||||
points_wanted = (before_wanted - first_entry_t) / group;
|
||||
|
||||
// recalculate after wanted with the new number of points
|
||||
after_wanted = before_wanted - (points_wanted * group * st->update_every) + st->update_every;
|
||||
|
||||
if(unlikely(after_wanted < first_entry_t)) {
|
||||
#ifdef NETDATA_INTERNAL_CHECKS
|
||||
error("INTERNAL ERROR: rrd2rrdr() on %s, after_wanted is before db min", st->name);
|
||||
#endif
|
||||
|
||||
while(after_wanted < first_entry_t)
|
||||
after_wanted += (((aligned) ? group : 1) * st->update_every);
|
||||
}
|
||||
}
|
||||
after_slot = rrdset_time2slot(st, after_wanted);
|
||||
|
||||
// check if they are reversed
|
||||
if(unlikely(after_wanted > before_wanted)) {
|
||||
#ifdef NETDATA_INTERNAL_CHECKS
|
||||
error("INTERNAL ERROR: rrd2rrdr() on %s, reversed wanted after/before", st->name);
|
||||
#endif
|
||||
time_t tmp = before_wanted;
|
||||
before_wanted = after_wanted;
|
||||
after_wanted = tmp;
|
||||
}
|
||||
|
||||
// recalculate points_wanted using the final time-frame
|
||||
points_wanted = (before_wanted - after_wanted) / st->update_every / group + 1;
|
||||
if(unlikely(points_wanted < 0)) {
|
||||
#ifdef NETDATA_INTERNAL_CHECKS
|
||||
error("INTERNAL ERROR: rrd2rrdr() on %s, points_wanted is %ld", st->name, points_wanted);
|
||||
#endif
|
||||
points_wanted = 0;
|
||||
}
|
||||
|
||||
duration = before_wanted - after_wanted;
|
||||
|
||||
#ifdef NETDATA_INTERNAL_CHECKS
|
||||
if(after_wanted < first_entry_t)
|
||||
error("INTERNAL CHECK: after_wanted %u is too small, minimum %u", (uint32_t)after_wanted, (uint32_t)first_entry_t);
|
||||
|
||||
if(after_wanted > last_entry_t)
|
||||
error("INTERNAL CHECK: after_wanted %u is too big, maximum %u", (uint32_t)after_wanted, (uint32_t)last_entry_t);
|
||||
|
||||
if(before_wanted < first_entry_t)
|
||||
error("INTERNAL CHECK: before_wanted %u is too small, minimum %u", (uint32_t)before_wanted, (uint32_t)first_entry_t);
|
||||
|
||||
if(before_wanted > last_entry_t)
|
||||
error("INTERNAL CHECK: before_wanted %u is too big, maximum %u", (uint32_t)before_wanted, (uint32_t)last_entry_t);
|
||||
|
||||
if(before_slot >= (size_t)st->entries)
|
||||
error("INTERNAL CHECK: before_slot is invalid %zu, expected 0 to %ld", before_slot, st->entries - 1);
|
||||
|
||||
if(after_slot >= (size_t)st->entries)
|
||||
error("INTERNAL CHECK: after_slot is invalid %zu, expected 0 to %ld", after_slot, st->entries - 1);
|
||||
|
||||
if(points_wanted > (before_wanted - after_wanted) / group / st->update_every + 1)
|
||||
error("INTERNAL CHECK: points_wanted %ld is more than points %ld", points_wanted, (before_wanted - after_wanted) / group / st->update_every + 1);
|
||||
|
||||
if(group < group_points)
|
||||
error("INTERNAL CHECK: group %ld is less than the desired group points %ld", group, group_points);
|
||||
|
||||
if(group > group_points && group % group_points)
|
||||
error("INTERNAL CHECK: group %ld is not a multiple of the desired group points %ld", group, group_points);
|
||||
#endif
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// initialize our result set
|
||||
// this also locks the chart for us
|
||||
|
||||
RRDR *r = rrdr_create(st, points_wanted);
|
||||
if(unlikely(!r)) {
|
||||
#ifdef NETDATA_INTERNAL_CHECKS
|
||||
error("INTERNAL CHECK: Cannot create RRDR for %s, after=%u, before=%u, duration=%u, points=%ld", st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (uint32_t)duration, points_wanted);
|
||||
#endif
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if(unlikely(!r->d || !points_wanted)) {
|
||||
#ifdef NETDATA_INTERNAL_CHECKS
|
||||
error("INTERNAL CHECK: Returning empty RRDR (no dimensions in RRDSET) for %s, after=%u, before=%u, duration=%zu, points=%ld", st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (size_t)duration, points_wanted);
|
||||
#endif
|
||||
return r;
|
||||
}
|
||||
|
||||
if(unlikely(absolute_period_requested == 1))
|
||||
r->result_options |= RRDR_RESULT_OPTION_ABSOLUTE;
|
||||
else
|
||||
r->result_options |= RRDR_RESULT_OPTION_RELATIVE;
|
||||
|
||||
// find how many dimensions we have
|
||||
long dimensions_count = r->d;
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// initialize RRDR
|
||||
|
||||
r->group = group;
|
||||
r->update_every = (int)group * st->update_every;
|
||||
r->before = before_wanted;
|
||||
r->after = after_wanted;
|
||||
r->group_points = group_points;
|
||||
r->group_sum_divisor = group_sum_divisor;
|
||||
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// assign the processor functions
|
||||
|
||||
{
|
||||
int i, found = 0;
|
||||
for(i = 0; !found && api_v1_data_groups[i].name ;i++) {
|
||||
if(api_v1_data_groups[i].value == group_method) {
|
||||
r->grouping_init = api_v1_data_groups[i].init;
|
||||
r->grouping_reset = api_v1_data_groups[i].reset;
|
||||
r->grouping_free = api_v1_data_groups[i].free;
|
||||
r->grouping_add = api_v1_data_groups[i].add;
|
||||
r->grouping_flush = api_v1_data_groups[i].flush;
|
||||
found = 1;
|
||||
}
|
||||
}
|
||||
if(!found) {
|
||||
errno = 0;
|
||||
#ifdef NETDATA_INTERNAL_CHECKS
|
||||
error("INTERNAL ERROR: grouping method %u not found for chart '%s'. Using 'average'", (unsigned int)group_method, r->st->name);
|
||||
#endif
|
||||
r->grouping_init = grouping_init_average;
|
||||
r->grouping_reset = grouping_reset_average;
|
||||
r->grouping_free = grouping_free_average;
|
||||
r->grouping_add = grouping_add_average;
|
||||
r->grouping_flush = grouping_flush_average;
|
||||
}
|
||||
}
|
||||
|
||||
// allocate any memory required by the grouping method
|
||||
r->grouping_data = r->grouping_init(r);
|
||||
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// disable the not-wanted dimensions
|
||||
|
||||
rrdset_check_rdlock(st);
|
||||
|
||||
if(dimensions)
|
||||
rrdr_disable_not_selected_dimensions(r, options, dimensions);
|
||||
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// do the work for each dimension
|
||||
|
||||
time_t max_after = 0, min_before = 0;
|
||||
long max_rows = 0;
|
||||
|
||||
RRDDIM *rd;
|
||||
long c, dimensions_used = 0, dimensions_nonzero = 0;
|
||||
for(rd = st->dimensions, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
|
||||
|
||||
// if we need a percentage, we need to calculate all dimensions
|
||||
if(unlikely(!(options & RRDR_OPTION_PERCENTAGE) && (r->od[c] & RRDR_DIMENSION_HIDDEN)))
|
||||
continue;
|
||||
|
||||
// reset the grouping for the new dimension
|
||||
r->grouping_reset(r);
|
||||
|
||||
do_dimension(
|
||||
r
|
||||
, points_wanted
|
||||
, rd
|
||||
, c
|
||||
, after_slot
|
||||
, before_slot
|
||||
, after_wanted
|
||||
, before_wanted
|
||||
);
|
||||
|
||||
if(r->od[c] & RRDR_DIMENSION_NONZERO)
|
||||
dimensions_nonzero++;
|
||||
|
||||
// verify all dimensions are aligned
|
||||
if(unlikely(!dimensions_used)) {
|
||||
min_before = r->before;
|
||||
max_after = r->after;
|
||||
max_rows = r->rows;
|
||||
}
|
||||
else {
|
||||
if(r->after != max_after) {
|
||||
#ifdef NETDATA_INTERNAL_CHECKS
|
||||
error("INTERNAL ERROR: 'after' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
|
||||
st->name, (size_t)max_after, rd->name, (size_t)r->after);
|
||||
#endif
|
||||
r->after = (r->after > max_after) ? r->after : max_after;
|
||||
}
|
||||
|
||||
if(r->before != min_before) {
|
||||
#ifdef NETDATA_INTERNAL_CHECKS
|
||||
error("INTERNAL ERROR: 'before' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
|
||||
st->name, (size_t)min_before, rd->name, (size_t)r->before);
|
||||
#endif
|
||||
r->before = (r->before < min_before) ? r->before : min_before;
|
||||
}
|
||||
|
||||
if(r->rows != max_rows) {
|
||||
#ifdef NETDATA_INTERNAL_CHECKS
|
||||
error("INTERNAL ERROR: 'rows' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
|
||||
st->name, (size_t)max_rows, rd->name, (size_t)r->rows);
|
||||
#endif
|
||||
r->rows = (r->rows > max_rows) ? r->rows : max_rows;
|
||||
}
|
||||
}
|
||||
|
||||
dimensions_used++;
|
||||
}
|
||||
|
||||
#ifdef NETDATA_INTERNAL_CHECKS
|
||||
|
||||
if(r->log)
|
||||
rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, group_time_requested, group_points, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, r->log);
|
||||
|
||||
if(r->rows != points_wanted)
|
||||
rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, group_time_requested, group_points, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "got 'points' is not wanted 'points'");
|
||||
|
||||
if(aligned && (r->before % group) != 0)
|
||||
rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, group_time_requested, group_points, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "'before' is not aligned but alignment is required");
|
||||
|
||||
// 'after' should not be aligned, since we start inside the first group
|
||||
//if(aligned && (r->after % group) != 0)
|
||||
// rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, group_time_requested, group_points, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "'after' is not aligned but alignment is required");
|
||||
|
||||
if(r->before != before_wanted)
|
||||
rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, group_time_requested, group_points, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "got 'before' is not wanted 'before'");
|
||||
|
||||
// reported 'after' varies, depending on group
|
||||
if((r->after - (group - 1) * r->st->update_every) != after_wanted)
|
||||
rrd2rrdr_log_request_response_metdata(r, group_method, aligned, group, group_time_requested, group_points, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "got 'after' is not wanted 'after'");
|
||||
|
||||
#endif
|
||||
|
||||
// free all resources used by the grouping method
|
||||
r->grouping_free(r);
|
||||
|
||||
// when all the dimensions are zero, we should return all of them
|
||||
if(unlikely(options & RRDR_OPTION_NONZERO && !dimensions_nonzero)) {
|
||||
// all the dimensions are zero
|
||||
// mark them as NONZERO to send them all
|
||||
for(rd = st->dimensions, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
|
||||
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
|
||||
r->od[c] |= RRDR_DIMENSION_NONZERO;
|
||||
}
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
22
web/api/queries/query.h
Normal file
22
web/api/queries/query.h
Normal file
|
@ -0,0 +1,22 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#ifndef NETDATA_API_DATA_QUERY_H
|
||||
#define NETDATA_API_DATA_QUERY_H
|
||||
|
||||
typedef enum rrdr_grouping {
|
||||
RRDR_GROUPING_UNDEFINED = 0,
|
||||
RRDR_GROUPING_AVERAGE = 1,
|
||||
RRDR_GROUPING_MIN = 2,
|
||||
RRDR_GROUPING_MAX = 3,
|
||||
RRDR_GROUPING_SUM = 4,
|
||||
RRDR_GROUPING_INCREMENTAL_SUM = 5,
|
||||
RRDR_GROUPING_MEDIAN = 6,
|
||||
RRDR_GROUPING_STDDEV = 7,
|
||||
RRDR_GROUPING_SES = 8,
|
||||
} RRDR_GROUPING;
|
||||
|
||||
extern const char *group_method2string(RRDR_GROUPING group);
|
||||
extern void web_client_api_v1_init_grouping(void);
|
||||
extern RRDR_GROUPING web_client_api_request_v1_data_group(const char *name, RRDR_GROUPING def);
|
||||
|
||||
#endif //NETDATA_API_DATA_QUERY_H
|
624
web/api/queries/rrdr.c
Normal file
624
web/api/queries/rrdr.c
Normal file
|
@ -0,0 +1,624 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "rrdr.h"
|
||||
|
||||
/*
|
||||
static void rrdr_dump(RRDR *r)
|
||||
{
|
||||
long c, i;
|
||||
RRDDIM *d;
|
||||
|
||||
fprintf(stderr, "\nCHART %s (%s)\n", r->st->id, r->st->name);
|
||||
|
||||
for(c = 0, d = r->st->dimensions; d ;c++, d = d->next) {
|
||||
fprintf(stderr, "DIMENSION %s (%s), %s%s%s%s\n"
|
||||
, d->id
|
||||
, d->name
|
||||
, (r->od[c] & RRDR_EMPTY)?"EMPTY ":""
|
||||
, (r->od[c] & RRDR_RESET)?"RESET ":""
|
||||
, (r->od[c] & RRDR_DIMENSION_HIDDEN)?"HIDDEN ":""
|
||||
, (r->od[c] & RRDR_DIMENSION_NONZERO)?"NONZERO ":""
|
||||
);
|
||||
}
|
||||
|
||||
if(r->rows <= 0) {
|
||||
fprintf(stderr, "RRDR does not have any values in it.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
fprintf(stderr, "RRDR includes %d values in it:\n", r->rows);
|
||||
|
||||
// for each line in the array
|
||||
for(i = 0; i < r->rows ;i++) {
|
||||
calculated_number *cn = &r->v[ i * r->d ];
|
||||
RRDR_DIMENSION_FLAGS *co = &r->o[ i * r->d ];
|
||||
|
||||
// print the id and the timestamp of the line
|
||||
fprintf(stderr, "%ld %ld ", i + 1, r->t[i]);
|
||||
|
||||
// for each dimension
|
||||
for(c = 0, d = r->st->dimensions; d ;c++, d = d->next) {
|
||||
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
|
||||
if(unlikely(!(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
|
||||
|
||||
if(co[c] & RRDR_EMPTY)
|
||||
fprintf(stderr, "null ");
|
||||
else
|
||||
fprintf(stderr, CALCULATED_NUMBER_FORMAT " %s%s%s%s "
|
||||
, cn[c]
|
||||
, (co[c] & RRDR_EMPTY)?"E":" "
|
||||
, (co[c] & RRDR_RESET)?"R":" "
|
||||
, (co[c] & RRDR_DIMENSION_HIDDEN)?"H":" "
|
||||
, (co[c] & RRDR_DIMENSION_NONZERO)?"N":" "
|
||||
);
|
||||
}
|
||||
|
||||
fprintf(stderr, "\n");
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
#define JSON_DATES_JS 1
|
||||
#define JSON_DATES_TIMESTAMP 2
|
||||
|
||||
void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable) {
|
||||
rrdset_check_rdlock(r->st);
|
||||
|
||||
//info("RRD2JSON(): %s: BEGIN", r->st->id);
|
||||
int row_annotations = 0, dates, dates_with_new = 0;
|
||||
char kq[2] = "", // key quote
|
||||
sq[2] = "", // string quote
|
||||
pre_label[101] = "", // before each label
|
||||
post_label[101] = "", // after each label
|
||||
pre_date[101] = "", // the beginning of line, to the date
|
||||
post_date[101] = "", // closing the date
|
||||
pre_value[101] = "", // before each value
|
||||
post_value[101] = "", // after each value
|
||||
post_line[101] = "", // at the end of each row
|
||||
normal_annotation[201] = "", // default row annotation
|
||||
overflow_annotation[201] = "", // overflow row annotation
|
||||
data_begin[101] = "", // between labels and values
|
||||
finish[101] = ""; // at the end of everything
|
||||
|
||||
if(datatable) {
|
||||
dates = JSON_DATES_JS;
|
||||
if( options & RRDR_OPTION_GOOGLE_JSON ) {
|
||||
kq[0] = '\0';
|
||||
sq[0] = '\'';
|
||||
}
|
||||
else {
|
||||
kq[0] = '"';
|
||||
sq[0] = '"';
|
||||
}
|
||||
row_annotations = 1;
|
||||
snprintfz(pre_date, 100, " {%sc%s:[{%sv%s:%s", kq, kq, kq, kq, sq);
|
||||
snprintfz(post_date, 100, "%s}", sq);
|
||||
snprintfz(pre_label, 100, ",\n {%sid%s:%s%s,%slabel%s:%s", kq, kq, sq, sq, kq, kq, sq);
|
||||
snprintfz(post_label, 100, "%s,%spattern%s:%s%s,%stype%s:%snumber%s}", sq, kq, kq, sq, sq, kq, kq, sq, sq);
|
||||
snprintfz(pre_value, 100, ",{%sv%s:", kq, kq);
|
||||
strcpy(post_value, "}");
|
||||
strcpy(post_line, "]}");
|
||||
snprintfz(data_begin, 100, "\n ],\n %srows%s:\n [\n", kq, kq);
|
||||
strcpy(finish, "\n ]\n}");
|
||||
|
||||
snprintfz(overflow_annotation, 200, ",{%sv%s:%sRESET OR OVERFLOW%s},{%sv%s:%sThe counters have been wrapped.%s}", kq, kq, sq, sq, kq, kq, sq, sq);
|
||||
snprintfz(normal_annotation, 200, ",{%sv%s:null},{%sv%s:null}", kq, kq, kq, kq);
|
||||
|
||||
buffer_sprintf(wb, "{\n %scols%s:\n [\n", kq, kq);
|
||||
buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%stime%s,%spattern%s:%s%s,%stype%s:%sdatetime%s},\n", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq);
|
||||
buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%s%s,%spattern%s:%s%s,%stype%s:%sstring%s,%sp%s:{%srole%s:%sannotation%s}},\n", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, kq, kq, sq, sq);
|
||||
buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%s%s,%spattern%s:%s%s,%stype%s:%sstring%s,%sp%s:{%srole%s:%sannotationText%s}}", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, kq, kq, sq, sq);
|
||||
|
||||
// remove the valueobjects flag
|
||||
// google wants its own keys
|
||||
if(options & RRDR_OPTION_OBJECTSROWS)
|
||||
options &= ~RRDR_OPTION_OBJECTSROWS;
|
||||
}
|
||||
else {
|
||||
kq[0] = '"';
|
||||
sq[0] = '"';
|
||||
if(options & RRDR_OPTION_GOOGLE_JSON) {
|
||||
dates = JSON_DATES_JS;
|
||||
dates_with_new = 1;
|
||||
}
|
||||
else {
|
||||
dates = JSON_DATES_TIMESTAMP;
|
||||
dates_with_new = 0;
|
||||
}
|
||||
if( options & RRDR_OPTION_OBJECTSROWS )
|
||||
strcpy(pre_date, " { ");
|
||||
else
|
||||
strcpy(pre_date, " [ ");
|
||||
strcpy(pre_label, ", \"");
|
||||
strcpy(post_label, "\"");
|
||||
strcpy(pre_value, ", ");
|
||||
if( options & RRDR_OPTION_OBJECTSROWS )
|
||||
strcpy(post_line, "}");
|
||||
else
|
||||
strcpy(post_line, "]");
|
||||
snprintfz(data_begin, 100, "],\n %sdata%s:\n [\n", kq, kq);
|
||||
strcpy(finish, "\n ]\n}");
|
||||
|
||||
buffer_sprintf(wb, "{\n %slabels%s: [", kq, kq);
|
||||
buffer_sprintf(wb, "%stime%s", sq, sq);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// print the JSON header
|
||||
|
||||
long c, i;
|
||||
RRDDIM *rd;
|
||||
|
||||
// print the header lines
|
||||
for(c = 0, i = 0, rd = r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
|
||||
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
|
||||
if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
|
||||
|
||||
buffer_strcat(wb, pre_label);
|
||||
buffer_strcat(wb, rd->name);
|
||||
buffer_strcat(wb, post_label);
|
||||
i++;
|
||||
}
|
||||
if(!i) {
|
||||
buffer_strcat(wb, pre_label);
|
||||
buffer_strcat(wb, "no data");
|
||||
buffer_strcat(wb, post_label);
|
||||
}
|
||||
|
||||
// print the begin of row data
|
||||
buffer_strcat(wb, data_begin);
|
||||
|
||||
// if all dimensions are hidden, print a null
|
||||
if(!i) {
|
||||
buffer_strcat(wb, finish);
|
||||
return;
|
||||
}
|
||||
|
||||
long start = 0, end = rrdr_rows(r), step = 1;
|
||||
if(!(options & RRDR_OPTION_REVERSED)) {
|
||||
start = rrdr_rows(r) - 1;
|
||||
end = -1;
|
||||
step = -1;
|
||||
}
|
||||
|
||||
// for each line in the array
|
||||
calculated_number total = 1;
|
||||
for(i = start; i != end ;i += step) {
|
||||
calculated_number *cn = &r->v[ i * r->d ];
|
||||
RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
|
||||
|
||||
time_t now = r->t[i];
|
||||
|
||||
if(dates == JSON_DATES_JS) {
|
||||
// generate the local date time
|
||||
struct tm tmbuf, *tm = localtime_r(&now, &tmbuf);
|
||||
if(!tm) { error("localtime_r() failed."); continue; }
|
||||
|
||||
if(likely(i != start)) buffer_strcat(wb, ",\n");
|
||||
buffer_strcat(wb, pre_date);
|
||||
|
||||
if( options & RRDR_OPTION_OBJECTSROWS )
|
||||
buffer_sprintf(wb, "%stime%s: ", kq, kq);
|
||||
|
||||
if(dates_with_new)
|
||||
buffer_strcat(wb, "new ");
|
||||
|
||||
buffer_jsdate(wb, tm->tm_year + 1900, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec);
|
||||
|
||||
buffer_strcat(wb, post_date);
|
||||
|
||||
if(row_annotations) {
|
||||
// google supports one annotation per row
|
||||
int annotation_found = 0;
|
||||
for(c = 0, rd = r->st->dimensions; rd ;c++, rd = rd->next) {
|
||||
if(co[c] & RRDR_VALUE_RESET) {
|
||||
buffer_strcat(wb, overflow_annotation);
|
||||
annotation_found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(!annotation_found)
|
||||
buffer_strcat(wb, normal_annotation);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// print the timestamp of the line
|
||||
if(likely(i != start)) buffer_strcat(wb, ",\n");
|
||||
buffer_strcat(wb, pre_date);
|
||||
|
||||
if( options & RRDR_OPTION_OBJECTSROWS )
|
||||
buffer_sprintf(wb, "%stime%s: ", kq, kq);
|
||||
|
||||
buffer_rrd_value(wb, (calculated_number)r->t[i]);
|
||||
// in ms
|
||||
if(options & RRDR_OPTION_MILLISECONDS) buffer_strcat(wb, "000");
|
||||
|
||||
buffer_strcat(wb, post_date);
|
||||
}
|
||||
|
||||
int set_min_max = 0;
|
||||
if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
|
||||
total = 0;
|
||||
for(c = 0, rd = r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
|
||||
calculated_number n = cn[c];
|
||||
|
||||
if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
|
||||
n = -n;
|
||||
|
||||
total += n;
|
||||
}
|
||||
// prevent a division by zero
|
||||
if(total == 0) total = 1;
|
||||
set_min_max = 1;
|
||||
}
|
||||
|
||||
// for each dimension
|
||||
for(c = 0, rd = r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
|
||||
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
|
||||
if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
|
||||
|
||||
calculated_number n = cn[c];
|
||||
|
||||
buffer_strcat(wb, pre_value);
|
||||
|
||||
if( options & RRDR_OPTION_OBJECTSROWS )
|
||||
buffer_sprintf(wb, "%s%s%s: ", kq, rd->name, kq);
|
||||
|
||||
if(co[c] & RRDR_VALUE_EMPTY) {
|
||||
if(options & RRDR_OPTION_NULL2ZERO)
|
||||
buffer_strcat(wb, "0");
|
||||
else
|
||||
buffer_strcat(wb, "null");
|
||||
}
|
||||
else {
|
||||
if(unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
|
||||
n = -n;
|
||||
|
||||
if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
|
||||
n = n * 100 / total;
|
||||
|
||||
if(unlikely(set_min_max)) {
|
||||
r->min = r->max = n;
|
||||
set_min_max = 0;
|
||||
}
|
||||
|
||||
if(n < r->min) r->min = n;
|
||||
if(n > r->max) r->max = n;
|
||||
}
|
||||
|
||||
buffer_rrd_value(wb, n);
|
||||
}
|
||||
|
||||
buffer_strcat(wb, post_value);
|
||||
}
|
||||
|
||||
buffer_strcat(wb, post_line);
|
||||
}
|
||||
|
||||
buffer_strcat(wb, finish);
|
||||
//info("RRD2JSON(): %s: END", r->st->id);
|
||||
}
|
||||
|
||||
void rrdr2csv(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, const char *startline, const char *separator, const char *endline, const char *betweenlines) {
|
||||
rrdset_check_rdlock(r->st);
|
||||
|
||||
//info("RRD2CSV(): %s: BEGIN", r->st->id);
|
||||
long c, i;
|
||||
RRDDIM *d;
|
||||
|
||||
// print the csv header
|
||||
for(c = 0, i = 0, d = r->st->dimensions; d && c < r->d ;c++, d = d->next) {
|
||||
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
|
||||
if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
|
||||
|
||||
if(!i) {
|
||||
buffer_strcat(wb, startline);
|
||||
if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
|
||||
buffer_strcat(wb, "time");
|
||||
if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
|
||||
}
|
||||
buffer_strcat(wb, separator);
|
||||
if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
|
||||
buffer_strcat(wb, d->name);
|
||||
if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
|
||||
i++;
|
||||
}
|
||||
buffer_strcat(wb, endline);
|
||||
|
||||
if(!i) {
|
||||
// no dimensions present
|
||||
return;
|
||||
}
|
||||
|
||||
long start = 0, end = rrdr_rows(r), step = 1;
|
||||
if(!(options & RRDR_OPTION_REVERSED)) {
|
||||
start = rrdr_rows(r) - 1;
|
||||
end = -1;
|
||||
step = -1;
|
||||
}
|
||||
|
||||
// for each line in the array
|
||||
calculated_number total = 1;
|
||||
for(i = start; i != end ;i += step) {
|
||||
calculated_number *cn = &r->v[ i * r->d ];
|
||||
RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
|
||||
|
||||
buffer_strcat(wb, betweenlines);
|
||||
buffer_strcat(wb, startline);
|
||||
|
||||
time_t now = r->t[i];
|
||||
|
||||
if((options & RRDR_OPTION_SECONDS) || (options & RRDR_OPTION_MILLISECONDS)) {
|
||||
// print the timestamp of the line
|
||||
buffer_rrd_value(wb, (calculated_number)now);
|
||||
// in ms
|
||||
if(options & RRDR_OPTION_MILLISECONDS) buffer_strcat(wb, "000");
|
||||
}
|
||||
else {
|
||||
// generate the local date time
|
||||
struct tm tmbuf, *tm = localtime_r(&now, &tmbuf);
|
||||
if(!tm) { error("localtime() failed."); continue; }
|
||||
buffer_date(wb, tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec);
|
||||
}
|
||||
|
||||
int set_min_max = 0;
|
||||
if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
|
||||
total = 0;
|
||||
for(c = 0, d = r->st->dimensions; d && c < r->d ;c++, d = d->next) {
|
||||
calculated_number n = cn[c];
|
||||
|
||||
if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
|
||||
n = -n;
|
||||
|
||||
total += n;
|
||||
}
|
||||
// prevent a division by zero
|
||||
if(total == 0) total = 1;
|
||||
set_min_max = 1;
|
||||
}
|
||||
|
||||
// for each dimension
|
||||
for(c = 0, d = r->st->dimensions; d && c < r->d ;c++, d = d->next) {
|
||||
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
|
||||
if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
|
||||
|
||||
buffer_strcat(wb, separator);
|
||||
|
||||
calculated_number n = cn[c];
|
||||
|
||||
if(co[c] & RRDR_VALUE_EMPTY) {
|
||||
if(options & RRDR_OPTION_NULL2ZERO)
|
||||
buffer_strcat(wb, "0");
|
||||
else
|
||||
buffer_strcat(wb, "null");
|
||||
}
|
||||
else {
|
||||
if(unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
|
||||
n = -n;
|
||||
|
||||
if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
|
||||
n = n * 100 / total;
|
||||
|
||||
if(unlikely(set_min_max)) {
|
||||
r->min = r->max = n;
|
||||
set_min_max = 0;
|
||||
}
|
||||
|
||||
if(n < r->min) r->min = n;
|
||||
if(n > r->max) r->max = n;
|
||||
}
|
||||
|
||||
buffer_rrd_value(wb, n);
|
||||
}
|
||||
}
|
||||
|
||||
buffer_strcat(wb, endline);
|
||||
}
|
||||
//info("RRD2CSV(): %s: END", r->st->id);
|
||||
}
|
||||
|
||||
inline calculated_number rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all_values_are_null) {
|
||||
rrdset_check_rdlock(r->st);
|
||||
|
||||
long c;
|
||||
RRDDIM *d;
|
||||
|
||||
calculated_number *cn = &r->v[ i * r->d ];
|
||||
RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
|
||||
|
||||
calculated_number sum = 0, min = 0, max = 0, v;
|
||||
int all_null = 1, init = 1;
|
||||
|
||||
calculated_number total = 1;
|
||||
int set_min_max = 0;
|
||||
if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
|
||||
total = 0;
|
||||
for(c = 0, d = r->st->dimensions; d && c < r->d ;c++, d = d->next) {
|
||||
calculated_number n = cn[c];
|
||||
|
||||
if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
|
||||
n = -n;
|
||||
|
||||
total += n;
|
||||
}
|
||||
// prevent a division by zero
|
||||
if(total == 0) total = 1;
|
||||
set_min_max = 1;
|
||||
}
|
||||
|
||||
// for each dimension
|
||||
for(c = 0, d = r->st->dimensions; d && c < r->d ;c++, d = d->next) {
|
||||
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
|
||||
if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
|
||||
|
||||
calculated_number n = cn[c];
|
||||
|
||||
if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
|
||||
n = -n;
|
||||
|
||||
if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
|
||||
n = n * 100 / total;
|
||||
|
||||
if(unlikely(set_min_max)) {
|
||||
r->min = r->max = n;
|
||||
set_min_max = 0;
|
||||
}
|
||||
|
||||
if(n < r->min) r->min = n;
|
||||
if(n > r->max) r->max = n;
|
||||
}
|
||||
|
||||
if(unlikely(init)) {
|
||||
if(n > 0) {
|
||||
min = 0;
|
||||
max = n;
|
||||
}
|
||||
else {
|
||||
min = n;
|
||||
max = 0;
|
||||
}
|
||||
init = 0;
|
||||
}
|
||||
|
||||
if(likely(!(co[c] & RRDR_VALUE_EMPTY))) {
|
||||
all_null = 0;
|
||||
sum += n;
|
||||
}
|
||||
|
||||
if(n < min) min = n;
|
||||
if(n > max) max = n;
|
||||
}
|
||||
|
||||
if(unlikely(all_null)) {
|
||||
if(likely(all_values_are_null))
|
||||
*all_values_are_null = 1;
|
||||
return 0;
|
||||
}
|
||||
else {
|
||||
if(likely(all_values_are_null))
|
||||
*all_values_are_null = 0;
|
||||
}
|
||||
|
||||
if(options & RRDR_OPTION_MIN2MAX)
|
||||
v = max - min;
|
||||
else
|
||||
v = sum;
|
||||
|
||||
return v;
|
||||
}
|
||||
|
||||
void rrdr2ssv(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, const char *prefix, const char *separator, const char *suffix) {
|
||||
//info("RRD2SSV(): %s: BEGIN", r->st->id);
|
||||
long i;
|
||||
|
||||
buffer_strcat(wb, prefix);
|
||||
long start = 0, end = rrdr_rows(r), step = 1;
|
||||
if(!(options & RRDR_OPTION_REVERSED)) {
|
||||
start = rrdr_rows(r) - 1;
|
||||
end = -1;
|
||||
step = -1;
|
||||
}
|
||||
|
||||
// for each line in the array
|
||||
for(i = start; i != end ;i += step) {
|
||||
int all_values_are_null = 0;
|
||||
calculated_number v = rrdr2value(r, i, options, &all_values_are_null);
|
||||
|
||||
if(likely(i != start)) {
|
||||
if(r->min > v) r->min = v;
|
||||
if(r->max < v) r->max = v;
|
||||
}
|
||||
else {
|
||||
r->min = v;
|
||||
r->max = v;
|
||||
}
|
||||
|
||||
if(likely(i != start))
|
||||
buffer_strcat(wb, separator);
|
||||
|
||||
if(all_values_are_null) {
|
||||
if(options & RRDR_OPTION_NULL2ZERO)
|
||||
buffer_strcat(wb, "0");
|
||||
else
|
||||
buffer_strcat(wb, "null");
|
||||
}
|
||||
else
|
||||
buffer_rrd_value(wb, v);
|
||||
}
|
||||
buffer_strcat(wb, suffix);
|
||||
//info("RRD2SSV(): %s: END", r->st->id);
|
||||
}
|
||||
|
||||
inline static void rrdr_lock_rrdset(RRDR *r) {
|
||||
if(unlikely(!r)) {
|
||||
error("NULL value given!");
|
||||
return;
|
||||
}
|
||||
|
||||
rrdset_rdlock(r->st);
|
||||
r->has_st_lock = 1;
|
||||
}
|
||||
|
||||
inline static void rrdr_unlock_rrdset(RRDR *r) {
|
||||
if(unlikely(!r)) {
|
||||
error("NULL value given!");
|
||||
return;
|
||||
}
|
||||
|
||||
if(likely(r->has_st_lock)) {
|
||||
rrdset_unlock(r->st);
|
||||
r->has_st_lock = 0;
|
||||
}
|
||||
}
|
||||
|
||||
inline void rrdr_free(RRDR *r)
|
||||
{
|
||||
if(unlikely(!r)) {
|
||||
error("NULL value given!");
|
||||
return;
|
||||
}
|
||||
|
||||
rrdr_unlock_rrdset(r);
|
||||
freez(r->t);
|
||||
freez(r->v);
|
||||
freez(r->o);
|
||||
freez(r->od);
|
||||
freez(r);
|
||||
}
|
||||
|
||||
RRDR *rrdr_create(RRDSET *st, long n)
|
||||
{
|
||||
if(unlikely(!st)) {
|
||||
error("NULL value given!");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
RRDR *r = callocz(1, sizeof(RRDR));
|
||||
r->st = st;
|
||||
|
||||
rrdr_lock_rrdset(r);
|
||||
|
||||
RRDDIM *rd;
|
||||
rrddim_foreach_read(rd, st) r->d++;
|
||||
|
||||
r->n = n;
|
||||
|
||||
r->t = callocz((size_t)n, sizeof(time_t));
|
||||
r->v = mallocz(n * r->d * sizeof(calculated_number));
|
||||
r->o = mallocz(n * r->d * sizeof(RRDR_VALUE_FLAGS));
|
||||
r->od = mallocz(r->d * sizeof(RRDR_DIMENSION_FLAGS));
|
||||
|
||||
// set the hidden flag on hidden dimensions
|
||||
int c;
|
||||
for(c = 0, rd = st->dimensions ; rd ; c++, rd = rd->next) {
|
||||
if(unlikely(rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN)))
|
||||
r->od[c] = RRDR_DIMENSION_HIDDEN;
|
||||
else
|
||||
r->od[c] = 0;
|
||||
}
|
||||
|
||||
r->group = 1;
|
||||
r->update_every = 1;
|
||||
|
||||
return r;
|
||||
}
|
105
web/api/queries/rrdr.h
Normal file
105
web/api/queries/rrdr.h
Normal file
|
@ -0,0 +1,105 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#ifndef NETDATA_QUERIES_RRDR_H
|
||||
#define NETDATA_QUERIES_RRDR_H
|
||||
|
||||
#include "libnetdata/libnetdata.h"
|
||||
#include "../web_api_v1.h"
|
||||
|
||||
typedef enum rrdr_options {
|
||||
RRDR_OPTION_NONZERO = 0x00000001, // don't output dimensions will just zero values
|
||||
RRDR_OPTION_REVERSED = 0x00000002, // output the rows in reverse order (oldest to newest)
|
||||
RRDR_OPTION_ABSOLUTE = 0x00000004, // values positive, for DATASOURCE_SSV before summing
|
||||
RRDR_OPTION_MIN2MAX = 0x00000008, // when adding dimensions, use max - min, instead of sum
|
||||
RRDR_OPTION_SECONDS = 0x00000010, // output seconds, instead of dates
|
||||
RRDR_OPTION_MILLISECONDS = 0x00000020, // output milliseconds, instead of dates
|
||||
RRDR_OPTION_NULL2ZERO = 0x00000040, // do not show nulls, convert them to zeros
|
||||
RRDR_OPTION_OBJECTSROWS = 0x00000080, // each row of values should be an object, not an array
|
||||
RRDR_OPTION_GOOGLE_JSON = 0x00000100, // comply with google JSON/JSONP specs
|
||||
RRDR_OPTION_JSON_WRAP = 0x00000200, // wrap the response in a JSON header with info about the result
|
||||
RRDR_OPTION_LABEL_QUOTES = 0x00000400, // in CSV output, wrap header labels in double quotes
|
||||
RRDR_OPTION_PERCENTAGE = 0x00000800, // give values as percentage of total
|
||||
RRDR_OPTION_NOT_ALIGNED = 0x00001000, // do not align charts for persistant timeframes
|
||||
RRDR_OPTION_DISPLAY_ABS = 0x00002000, // for badges, display the absolute value, but calculate colors with sign
|
||||
RRDR_OPTION_MATCH_IDS = 0x00004000, // when filtering dimensions, match only IDs
|
||||
RRDR_OPTION_MATCH_NAMES = 0x00008000, // when filtering dimensions, match only names
|
||||
} RRDR_OPTIONS;
|
||||
|
||||
typedef enum rrdr_value_flag {
|
||||
RRDR_VALUE_NOTHING = 0x00, // no flag set
|
||||
RRDR_VALUE_EMPTY = 0x01, // the value is empty
|
||||
RRDR_VALUE_RESET = 0x02, // the value has been reset
|
||||
} RRDR_VALUE_FLAGS;
|
||||
|
||||
typedef enum rrdr_dimension_flag {
|
||||
RRDR_DIMENSION_HIDDEN = 0x04, // the dimension is hidden
|
||||
RRDR_DIMENSION_NONZERO = 0x08, // the dimension non zero
|
||||
RRDR_DIMENSION_SELECTED = 0x10, // the dimension is selected
|
||||
} RRDR_DIMENSION_FLAGS;
|
||||
|
||||
// RRDR result options
|
||||
typedef enum rrdr_result_flags {
|
||||
RRDR_RESULT_OPTION_ABSOLUTE = 0x00000001,
|
||||
RRDR_RESULT_OPTION_RELATIVE = 0x00000002,
|
||||
} RRDR_RESULT_FLAGS;
|
||||
|
||||
typedef struct rrdresult {
|
||||
RRDSET *st; // the chart this result refers to
|
||||
|
||||
RRDR_RESULT_FLAGS result_options; // RRDR_RESULT_OPTION_*
|
||||
|
||||
int d; // the number of dimensions
|
||||
long n; // the number of values in the arrays
|
||||
long rows; // the number of rows used
|
||||
|
||||
RRDR_DIMENSION_FLAGS *od; // the options for the dimensions
|
||||
|
||||
time_t *t; // array of n timestamps
|
||||
calculated_number *v; // array n x d values
|
||||
RRDR_VALUE_FLAGS *o; // array n x d options for each value returned
|
||||
|
||||
long group; // how many collected values were grouped for each row
|
||||
int update_every; // what is the suggested update frequency in seconds
|
||||
|
||||
calculated_number min;
|
||||
calculated_number max;
|
||||
|
||||
time_t before;
|
||||
time_t after;
|
||||
|
||||
int has_st_lock; // if st is read locked by us
|
||||
|
||||
// internal rrd2rrdr() members below this point
|
||||
long group_points;
|
||||
calculated_number group_sum_divisor;
|
||||
|
||||
void *(*grouping_init)(struct rrdresult *r);
|
||||
void (*grouping_reset)(struct rrdresult *r);
|
||||
void (*grouping_free)(struct rrdresult *r);
|
||||
void (*grouping_add)(struct rrdresult *r, calculated_number value);
|
||||
calculated_number (*grouping_flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
|
||||
void *grouping_data;
|
||||
|
||||
#ifdef NETDATA_INTERNAL_CHECKS
|
||||
const char *log;
|
||||
#endif
|
||||
} RRDR;
|
||||
|
||||
#define rrdr_rows(r) ((r)->rows)
|
||||
|
||||
// formatters
|
||||
extern void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable);
|
||||
extern void rrdr2csv(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, const char *startline, const char *separator, const char *endline, const char *betweenlines);
|
||||
extern calculated_number rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all_values_are_null);
|
||||
extern void rrdr2ssv(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, const char *prefix, const char *separator, const char *suffix);
|
||||
|
||||
extern void rrdr_free(RRDR *r);
|
||||
extern RRDR *rrdr_create(RRDSET *st, long n);
|
||||
|
||||
#include "web/api/queries/query.h"
|
||||
|
||||
extern RRDR *rrd2rrdr(RRDSET *st, long points_requested, long long after_requested, long long before_requested, RRDR_GROUPING group_method, long group_time_requested, RRDR_OPTIONS options, const char *dimensions);
|
||||
|
||||
#include "query.h"
|
||||
|
||||
#endif //NETDATA_QUERIES_RRDR_H
|
8
web/api/queries/ses/Makefile.am
Normal file
8
web/api/queries/ses/Makefile.am
Normal file
|
@ -0,0 +1,8 @@
|
|||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
AUTOMAKE_OPTIONS = subdir-objects
|
||||
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
|
||||
|
||||
dist_noinst_DATA = \
|
||||
README.md \
|
||||
$(NULL)
|
1
web/api/queries/ses/README.md
Normal file
1
web/api/queries/ses/README.md
Normal file
|
@ -0,0 +1 @@
|
|||
# single exponential smoothing
|
74
web/api/queries/ses/ses.c
Normal file
74
web/api/queries/ses/ses.c
Normal file
|
@ -0,0 +1,74 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "ses.h"
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// single exponential smoothing
|
||||
|
||||
struct grouping_ses {
|
||||
calculated_number alpha;
|
||||
calculated_number alpha_older;
|
||||
calculated_number level;
|
||||
size_t count;
|
||||
size_t has_data;
|
||||
};
|
||||
|
||||
static inline void set_alpha(RRDR *r, struct grouping_ses *g) {
|
||||
g->alpha = 1.0 / r->group;
|
||||
g->alpha_older = 1 - g->alpha;
|
||||
}
|
||||
|
||||
void *grouping_init_ses(RRDR *r) {
|
||||
struct grouping_ses *g = (struct grouping_ses *)callocz(1, sizeof(struct grouping_ses));
|
||||
set_alpha(r, g);
|
||||
g->level = 0.0;
|
||||
return g;
|
||||
}
|
||||
|
||||
// resets when switches dimensions
|
||||
// so, clear everything to restart
|
||||
void grouping_reset_ses(RRDR *r) {
|
||||
struct grouping_ses *g = (struct grouping_ses *)r->grouping_data;
|
||||
g->level = 0.0;
|
||||
g->count = 0;
|
||||
g->has_data = 0;
|
||||
}
|
||||
|
||||
void grouping_free_ses(RRDR *r) {
|
||||
freez(r->grouping_data);
|
||||
r->grouping_data = NULL;
|
||||
}
|
||||
|
||||
void grouping_add_ses(RRDR *r, calculated_number value) {
|
||||
struct grouping_ses *g = (struct grouping_ses *)r->grouping_data;
|
||||
|
||||
if(isnormal(value)) {
|
||||
if(unlikely(!g->has_data)) {
|
||||
g->level = value;
|
||||
g->has_data = 1;
|
||||
}
|
||||
|
||||
g->level = g->alpha * value + g->alpha_older * g->level;
|
||||
|
||||
g->count++;
|
||||
}
|
||||
}
|
||||
|
||||
calculated_number grouping_flush_ses(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
|
||||
struct grouping_ses *g = (struct grouping_ses *)r->grouping_data;
|
||||
|
||||
calculated_number value;
|
||||
|
||||
if(unlikely(!g->count || !isnormal(g->level))) {
|
||||
value = 0.0;
|
||||
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
|
||||
}
|
||||
else {
|
||||
value = g->level;
|
||||
}
|
||||
|
||||
g->count = 0;
|
||||
|
||||
return value;
|
||||
}
|
15
web/api/queries/ses/ses.h
Normal file
15
web/api/queries/ses/ses.h
Normal file
|
@ -0,0 +1,15 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#ifndef NETDATA_API_QUERIES_SES_H
|
||||
#define NETDATA_API_QUERIES_SES_H
|
||||
|
||||
#include "../query.h"
|
||||
#include "../rrdr.h"
|
||||
|
||||
extern void *grouping_init_ses(RRDR *r);
|
||||
extern void grouping_reset_ses(RRDR *r);
|
||||
extern void grouping_free_ses(RRDR *r);
|
||||
extern void grouping_add_ses(RRDR *r, calculated_number value);
|
||||
extern calculated_number grouping_flush_ses(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
|
||||
|
||||
#endif //NETDATA_API_QUERIES_SES_H
|
8
web/api/queries/stddev/Makefile.am
Normal file
8
web/api/queries/stddev/Makefile.am
Normal file
|
@ -0,0 +1,8 @@
|
|||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
AUTOMAKE_OPTIONS = subdir-objects
|
||||
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
|
||||
|
||||
dist_noinst_DATA = \
|
||||
README.md \
|
||||
$(NULL)
|
0
web/api/queries/stddev/README.md
Normal file
0
web/api/queries/stddev/README.md
Normal file
73
web/api/queries/stddev/stddev.c
Normal file
73
web/api/queries/stddev/stddev.c
Normal file
|
@ -0,0 +1,73 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "stddev.h"
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// stddev
|
||||
|
||||
struct grouping_stddev {
|
||||
size_t series_size;
|
||||
size_t next_pos;
|
||||
|
||||
LONG_DOUBLE series[];
|
||||
};
|
||||
|
||||
void *grouping_init_stddev(RRDR *r) {
|
||||
long entries = (r->group > r->group_points) ? r->group : r->group_points;
|
||||
if(entries < 0) entries = 0;
|
||||
|
||||
struct grouping_stddev *g = (struct grouping_stddev *)callocz(1, sizeof(struct grouping_stddev) + entries * sizeof(LONG_DOUBLE));
|
||||
g->series_size = (size_t)entries;
|
||||
|
||||
return g;
|
||||
}
|
||||
|
||||
// resets when switches dimensions
|
||||
// so, clear everything to restart
|
||||
void grouping_reset_stddev(RRDR *r) {
|
||||
struct grouping_stddev *g = (struct grouping_stddev *)r->grouping_data;
|
||||
g->next_pos = 0;
|
||||
}
|
||||
|
||||
void grouping_free_stddev(RRDR *r) {
|
||||
freez(r->grouping_data);
|
||||
}
|
||||
|
||||
void grouping_add_stddev(RRDR *r, calculated_number value) {
|
||||
struct grouping_stddev *g = (struct grouping_stddev *)r->grouping_data;
|
||||
|
||||
if(unlikely(g->next_pos >= g->series_size)) {
|
||||
error("INTERNAL ERROR: stddev buffer overflow on chart '%s' - next_pos = %zu, series_size = %zu, r->group = %ld, r->group_points = %ld.", r->st->name, g->next_pos, g->series_size, r->group, r->group_points);
|
||||
}
|
||||
else {
|
||||
if(isnormal(value))
|
||||
g->series[g->next_pos++] = (LONG_DOUBLE)value;
|
||||
}
|
||||
}
|
||||
|
||||
calculated_number grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
|
||||
struct grouping_stddev *g = (struct grouping_stddev *)r->grouping_data;
|
||||
|
||||
calculated_number value;
|
||||
|
||||
if(unlikely(!g->next_pos)) {
|
||||
value = 0.0;
|
||||
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
|
||||
}
|
||||
else {
|
||||
value = standard_deviation(g->series, g->next_pos);
|
||||
|
||||
if(!isnormal(value)) {
|
||||
value = 0.0;
|
||||
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
|
||||
}
|
||||
|
||||
//log_series_to_stderr(g->series, g->next_pos, value, "stddev");
|
||||
}
|
||||
|
||||
g->next_pos = 0;
|
||||
|
||||
return value;
|
||||
}
|
||||
|
15
web/api/queries/stddev/stddev.h
Normal file
15
web/api/queries/stddev/stddev.h
Normal file
|
@ -0,0 +1,15 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#ifndef NETDATA_API_QUERIES_STDDEV_H
|
||||
#define NETDATA_API_QUERIES_STDDEV_H
|
||||
|
||||
#include "../query.h"
|
||||
#include "../rrdr.h"
|
||||
|
||||
extern void *grouping_init_stddev(RRDR *r);
|
||||
extern void grouping_reset_stddev(RRDR *r);
|
||||
extern void grouping_free_stddev(RRDR *r);
|
||||
extern void grouping_add_stddev(RRDR *r, calculated_number value);
|
||||
extern calculated_number grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
|
||||
|
||||
#endif //NETDATA_API_QUERIES_STDDEV_H
|
8
web/api/queries/sum/Makefile.am
Normal file
8
web/api/queries/sum/Makefile.am
Normal file
|
@ -0,0 +1,8 @@
|
|||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
AUTOMAKE_OPTIONS = subdir-objects
|
||||
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
|
||||
|
||||
dist_noinst_DATA = \
|
||||
README.md \
|
||||
$(NULL)
|
0
web/api/queries/sum/README.md
Normal file
0
web/api/queries/sum/README.md
Normal file
60
web/api/queries/sum/sum.c
Normal file
60
web/api/queries/sum/sum.c
Normal file
|
@ -0,0 +1,60 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#include "sum.h"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// sum
|
||||
|
||||
struct grouping_sum {
|
||||
calculated_number sum;
|
||||
size_t count;
|
||||
};
|
||||
|
||||
void *grouping_init_sum(RRDR *r) {
|
||||
(void)r;
|
||||
return callocz(1, sizeof(struct grouping_sum));
|
||||
}
|
||||
|
||||
// resets when switches dimensions
|
||||
// so, clear everything to restart
|
||||
void grouping_reset_sum(RRDR *r) {
|
||||
struct grouping_sum *g = (struct grouping_sum *)r->grouping_data;
|
||||
g->sum = 0;
|
||||
g->count = 0;
|
||||
}
|
||||
|
||||
void grouping_free_sum(RRDR *r) {
|
||||
freez(r->grouping_data);
|
||||
}
|
||||
|
||||
void grouping_add_sum(RRDR *r, calculated_number value) {
|
||||
if(!isnan(value)) {
|
||||
struct grouping_sum *g = (struct grouping_sum *)r->grouping_data;
|
||||
|
||||
if(!g->count || calculated_number_fabs(value) > calculated_number_fabs(g->sum)) {
|
||||
g->sum += value;
|
||||
g->count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
calculated_number grouping_flush_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
|
||||
struct grouping_sum *g = (struct grouping_sum *)r->grouping_data;
|
||||
|
||||
calculated_number value;
|
||||
|
||||
if(unlikely(!g->count)) {
|
||||
value = 0.0;
|
||||
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
|
||||
}
|
||||
else {
|
||||
value = g->sum;
|
||||
}
|
||||
|
||||
g->sum = 0.0;
|
||||
g->count = 0;
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
|
15
web/api/queries/sum/sum.h
Normal file
15
web/api/queries/sum/sum.h
Normal file
|
@ -0,0 +1,15 @@
|
|||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
#ifndef NETDATA_API_QUERY_SUM_H
|
||||
#define NETDATA_API_QUERY_SUM_H
|
||||
|
||||
#include "../query.h"
|
||||
#include "../rrdr.h"
|
||||
|
||||
extern void *grouping_init_sum(RRDR *r);
|
||||
extern void grouping_reset_sum(RRDR *r);
|
||||
extern void grouping_free_sum(RRDR *r);
|
||||
extern void grouping_add_sum(RRDR *r, calculated_number value);
|
||||
extern calculated_number grouping_flush_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
|
||||
|
||||
#endif //NETDATA_API_QUERY_SUM_H
|
1546
web/api/rrd2json.c
1546
web/api/rrd2json.c
File diff suppressed because it is too large
Load diff
|
@ -4,6 +4,8 @@
|
|||
#define NETDATA_RRD2JSON_H 1
|
||||
|
||||
#include "web_api_v1.h"
|
||||
#include "exporters/allmetrics.h"
|
||||
#include "queries/rrdr.h"
|
||||
|
||||
#define HOSTNAME_MAX 1024
|
||||
|
||||
|
@ -35,46 +37,9 @@
|
|||
#define DATASOURCE_FORMAT_SSV_COMMA "ssvcomma"
|
||||
#define DATASOURCE_FORMAT_CSV_JSON_ARRAY "csvjsonarray"
|
||||
|
||||
#define ALLMETRICS_FORMAT_SHELL "shell"
|
||||
#define ALLMETRICS_FORMAT_PROMETHEUS "prometheus"
|
||||
#define ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS "prometheus_all_hosts"
|
||||
#define ALLMETRICS_FORMAT_JSON "json"
|
||||
|
||||
#define ALLMETRICS_SHELL 1
|
||||
#define ALLMETRICS_PROMETHEUS 2
|
||||
#define ALLMETRICS_JSON 3
|
||||
#define ALLMETRICS_PROMETHEUS_ALL_HOSTS 4
|
||||
|
||||
#define GROUP_UNDEFINED 0
|
||||
#define GROUP_AVERAGE 1
|
||||
#define GROUP_MIN 2
|
||||
#define GROUP_MAX 3
|
||||
#define GROUP_SUM 4
|
||||
#define GROUP_INCREMENTAL_SUM 5
|
||||
|
||||
#define RRDR_OPTION_NONZERO 0x00000001 // don't output dimensions will just zero values
|
||||
#define RRDR_OPTION_REVERSED 0x00000002 // output the rows in reverse order (oldest to newest)
|
||||
#define RRDR_OPTION_ABSOLUTE 0x00000004 // values positive, for DATASOURCE_SSV before summing
|
||||
#define RRDR_OPTION_MIN2MAX 0x00000008 // when adding dimensions, use max - min, instead of sum
|
||||
#define RRDR_OPTION_SECONDS 0x00000010 // output seconds, instead of dates
|
||||
#define RRDR_OPTION_MILLISECONDS 0x00000020 // output milliseconds, instead of dates
|
||||
#define RRDR_OPTION_NULL2ZERO 0x00000040 // do not show nulls, convert them to zeros
|
||||
#define RRDR_OPTION_OBJECTSROWS 0x00000080 // each row of values should be an object, not an array
|
||||
#define RRDR_OPTION_GOOGLE_JSON 0x00000100 // comply with google JSON/JSONP specs
|
||||
#define RRDR_OPTION_JSON_WRAP 0x00000200 // wrap the response in a JSON header with info about the result
|
||||
#define RRDR_OPTION_LABEL_QUOTES 0x00000400 // in CSV output, wrap header labels in double quotes
|
||||
#define RRDR_OPTION_PERCENTAGE 0x00000800 // give values as percentage of total
|
||||
#define RRDR_OPTION_NOT_ALIGNED 0x00001000 // do not align charts for persistant timeframes
|
||||
#define RRDR_OPTION_DISPLAY_ABS 0x00002000 // for badges, display the absolute value, but calculate colors with sign
|
||||
#define RRDR_OPTION_MATCH_IDS 0x00004000 // when filtering dimensions, match only IDs
|
||||
#define RRDR_OPTION_MATCH_NAMES 0x00008000 // when filtering dimensions, match only names
|
||||
|
||||
extern void rrd_stats_api_v1_chart(RRDSET *st, BUFFER *wb);
|
||||
extern void rrd_stats_api_v1_charts(RRDHOST *host, BUFFER *wb);
|
||||
|
||||
extern void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, BUFFER *wb);
|
||||
extern void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, BUFFER *wb);
|
||||
|
||||
extern int rrdset2anything_api_v1(
|
||||
RRDSET *st
|
||||
, BUFFER *wb
|
||||
|
|
|
@ -5,21 +5,7 @@
|
|||
static struct {
|
||||
const char *name;
|
||||
uint32_t hash;
|
||||
int value;
|
||||
} api_v1_data_groups[] = {
|
||||
{ "average" , 0 , GROUP_AVERAGE}
|
||||
, {"min" , 0 , GROUP_MIN}
|
||||
, {"max" , 0 , GROUP_MAX}
|
||||
, {"sum" , 0 , GROUP_SUM}
|
||||
, {"incremental_sum", 0 , GROUP_INCREMENTAL_SUM}
|
||||
, {"incremental-sum", 0 , GROUP_INCREMENTAL_SUM}
|
||||
, { NULL, 0, 0}
|
||||
};
|
||||
|
||||
static struct {
|
||||
const char *name;
|
||||
uint32_t hash;
|
||||
uint32_t value;
|
||||
RRDR_OPTIONS value;
|
||||
} api_v1_data_options[] = {
|
||||
{ "nonzero" , 0 , RRDR_OPTION_NONZERO}
|
||||
, {"flip" , 0 , RRDR_OPTION_REVERSED}
|
||||
|
@ -86,9 +72,6 @@ static struct {
|
|||
void web_client_api_v1_init(void) {
|
||||
int i;
|
||||
|
||||
for(i = 0; api_v1_data_groups[i].name ; i++)
|
||||
api_v1_data_groups[i].hash = simple_hash(api_v1_data_groups[i].name);
|
||||
|
||||
for(i = 0; api_v1_data_options[i].name ; i++)
|
||||
api_v1_data_options[i].hash = simple_hash(api_v1_data_options[i].name);
|
||||
|
||||
|
@ -97,17 +80,8 @@ void web_client_api_v1_init(void) {
|
|||
|
||||
for(i = 0; api_v1_data_google_formats[i].name ; i++)
|
||||
api_v1_data_google_formats[i].hash = simple_hash(api_v1_data_google_formats[i].name);
|
||||
}
|
||||
|
||||
inline int web_client_api_request_v1_data_group(char *name, int def) {
|
||||
int i;
|
||||
|
||||
uint32_t hash = simple_hash(name);
|
||||
for(i = 0; api_v1_data_groups[i].name ; i++)
|
||||
if(unlikely(hash == api_v1_data_groups[i].hash && !strcmp(name, api_v1_data_groups[i].name)))
|
||||
return api_v1_data_groups[i].value;
|
||||
|
||||
return def;
|
||||
web_client_api_v1_init_grouping();
|
||||
}
|
||||
|
||||
inline uint32_t web_client_api_request_v1_data_options(char *o) {
|
||||
|
@ -254,373 +228,10 @@ inline int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w,
|
|||
return 200;
|
||||
}
|
||||
|
||||
struct prometheus_output_options {
|
||||
char *name;
|
||||
PROMETHEUS_OUTPUT_OPTIONS flag;
|
||||
} prometheus_output_flags_root[] = {
|
||||
{ "help", PROMETHEUS_OUTPUT_HELP },
|
||||
{ "types", PROMETHEUS_OUTPUT_TYPES },
|
||||
{ "names", PROMETHEUS_OUTPUT_NAMES },
|
||||
{ "timestamps", PROMETHEUS_OUTPUT_TIMESTAMPS },
|
||||
{ "variables", PROMETHEUS_OUTPUT_VARIABLES },
|
||||
|
||||
// terminator
|
||||
{ NULL, PROMETHEUS_OUTPUT_NONE },
|
||||
};
|
||||
|
||||
inline int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url) {
|
||||
int format = ALLMETRICS_SHELL;
|
||||
const char *prometheus_server = w->client_ip;
|
||||
uint32_t prometheus_backend_options = global_backend_options;
|
||||
PROMETHEUS_OUTPUT_OPTIONS prometheus_output_options = PROMETHEUS_OUTPUT_TIMESTAMPS | ((global_backend_options & BACKEND_OPTION_SEND_NAMES)?PROMETHEUS_OUTPUT_NAMES:0);
|
||||
const char *prometheus_prefix = global_backend_prefix;
|
||||
|
||||
while(url) {
|
||||
char *value = mystrsep(&url, "?&");
|
||||
if (!value || !*value) continue;
|
||||
|
||||
char *name = mystrsep(&value, "=");
|
||||
if(!name || !*name) continue;
|
||||
if(!value || !*value) continue;
|
||||
|
||||
if(!strcmp(name, "format")) {
|
||||
if(!strcmp(value, ALLMETRICS_FORMAT_SHELL))
|
||||
format = ALLMETRICS_SHELL;
|
||||
else if(!strcmp(value, ALLMETRICS_FORMAT_PROMETHEUS))
|
||||
format = ALLMETRICS_PROMETHEUS;
|
||||
else if(!strcmp(value, ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS))
|
||||
format = ALLMETRICS_PROMETHEUS_ALL_HOSTS;
|
||||
else if(!strcmp(value, ALLMETRICS_FORMAT_JSON))
|
||||
format = ALLMETRICS_JSON;
|
||||
else
|
||||
format = 0;
|
||||
}
|
||||
else if(!strcmp(name, "server")) {
|
||||
prometheus_server = value;
|
||||
}
|
||||
else if(!strcmp(name, "prefix")) {
|
||||
prometheus_prefix = value;
|
||||
}
|
||||
else if(!strcmp(name, "data") || !strcmp(name, "source") || !strcmp(name, "data source") || !strcmp(name, "data-source") || !strcmp(name, "data_source") || !strcmp(name, "datasource")) {
|
||||
prometheus_backend_options = backend_parse_data_source(value, prometheus_backend_options);
|
||||
}
|
||||
else {
|
||||
int i;
|
||||
for(i = 0; prometheus_output_flags_root[i].name ; i++) {
|
||||
if(!strcmp(name, prometheus_output_flags_root[i].name)) {
|
||||
if(!strcmp(value, "yes") || !strcmp(value, "1") || !strcmp(value, "true"))
|
||||
prometheus_output_options |= prometheus_output_flags_root[i].flag;
|
||||
else
|
||||
prometheus_output_options &= ~prometheus_output_flags_root[i].flag;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
buffer_flush(w->response.data);
|
||||
buffer_no_cacheable(w->response.data);
|
||||
|
||||
switch(format) {
|
||||
case ALLMETRICS_JSON:
|
||||
w->response.data->contenttype = CT_APPLICATION_JSON;
|
||||
rrd_stats_api_v1_charts_allmetrics_json(host, w->response.data);
|
||||
return 200;
|
||||
|
||||
case ALLMETRICS_SHELL:
|
||||
w->response.data->contenttype = CT_TEXT_PLAIN;
|
||||
rrd_stats_api_v1_charts_allmetrics_shell(host, w->response.data);
|
||||
return 200;
|
||||
|
||||
case ALLMETRICS_PROMETHEUS:
|
||||
w->response.data->contenttype = CT_PROMETHEUS;
|
||||
rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(
|
||||
host
|
||||
, w->response.data
|
||||
, prometheus_server
|
||||
, prometheus_prefix
|
||||
, prometheus_backend_options
|
||||
, prometheus_output_options
|
||||
);
|
||||
return 200;
|
||||
|
||||
case ALLMETRICS_PROMETHEUS_ALL_HOSTS:
|
||||
w->response.data->contenttype = CT_PROMETHEUS;
|
||||
rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(
|
||||
host
|
||||
, w->response.data
|
||||
, prometheus_server
|
||||
, prometheus_prefix
|
||||
, prometheus_backend_options
|
||||
, prometheus_output_options
|
||||
);
|
||||
return 200;
|
||||
|
||||
default:
|
||||
w->response.data->contenttype = CT_TEXT_PLAIN;
|
||||
buffer_strcat(w->response.data, "Which format? '" ALLMETRICS_FORMAT_SHELL "', '" ALLMETRICS_FORMAT_PROMETHEUS "', '" ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS "' and '" ALLMETRICS_FORMAT_JSON "' are currently supported.");
|
||||
return 400;
|
||||
}
|
||||
}
|
||||
|
||||
inline int web_client_api_request_v1_chart(RRDHOST *host, struct web_client *w, char *url) {
|
||||
return web_client_api_request_single_chart(host, w, url, rrd_stats_api_v1_chart);
|
||||
}
|
||||
|
||||
int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *url) {
|
||||
int ret = 400;
|
||||
buffer_flush(w->response.data);
|
||||
|
||||
BUFFER *dimensions = NULL;
|
||||
|
||||
const char *chart = NULL
|
||||
, *before_str = NULL
|
||||
, *after_str = NULL
|
||||
, *points_str = NULL
|
||||
, *multiply_str = NULL
|
||||
, *divide_str = NULL
|
||||
, *label = NULL
|
||||
, *units = NULL
|
||||
, *label_color = NULL
|
||||
, *value_color = NULL
|
||||
, *refresh_str = NULL
|
||||
, *precision_str = NULL
|
||||
, *scale_str = NULL
|
||||
, *alarm = NULL;
|
||||
|
||||
int group = GROUP_AVERAGE;
|
||||
uint32_t options = 0x00000000;
|
||||
|
||||
while(url) {
|
||||
char *value = mystrsep(&url, "/?&");
|
||||
if(!value || !*value) continue;
|
||||
|
||||
char *name = mystrsep(&value, "=");
|
||||
if(!name || !*name) continue;
|
||||
if(!value || !*value) continue;
|
||||
|
||||
debug(D_WEB_CLIENT, "%llu: API v1 badge.svg query param '%s' with value '%s'", w->id, name, value);
|
||||
|
||||
// name and value are now the parameters
|
||||
// they are not null and not empty
|
||||
|
||||
if(!strcmp(name, "chart")) chart = value;
|
||||
else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) {
|
||||
if(!dimensions)
|
||||
dimensions = buffer_create(100);
|
||||
|
||||
buffer_strcat(dimensions, "|");
|
||||
buffer_strcat(dimensions, value);
|
||||
}
|
||||
else if(!strcmp(name, "after")) after_str = value;
|
||||
else if(!strcmp(name, "before")) before_str = value;
|
||||
else if(!strcmp(name, "points")) points_str = value;
|
||||
else if(!strcmp(name, "group")) {
|
||||
group = web_client_api_request_v1_data_group(value, GROUP_AVERAGE);
|
||||
}
|
||||
else if(!strcmp(name, "options")) {
|
||||
options |= web_client_api_request_v1_data_options(value);
|
||||
}
|
||||
else if(!strcmp(name, "label")) label = value;
|
||||
else if(!strcmp(name, "units")) units = value;
|
||||
else if(!strcmp(name, "label_color")) label_color = value;
|
||||
else if(!strcmp(name, "value_color")) value_color = value;
|
||||
else if(!strcmp(name, "multiply")) multiply_str = value;
|
||||
else if(!strcmp(name, "divide")) divide_str = value;
|
||||
else if(!strcmp(name, "refresh")) refresh_str = value;
|
||||
else if(!strcmp(name, "precision")) precision_str = value;
|
||||
else if(!strcmp(name, "scale")) scale_str = value;
|
||||
else if(!strcmp(name, "alarm")) alarm = value;
|
||||
}
|
||||
|
||||
if(!chart || !*chart) {
|
||||
buffer_no_cacheable(w->response.data);
|
||||
buffer_sprintf(w->response.data, "No chart id is given at the request.");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
int scale = (scale_str && *scale_str)?str2i(scale_str):100;
|
||||
|
||||
RRDSET *st = rrdset_find(host, chart);
|
||||
if(!st) st = rrdset_find_byname(host, chart);
|
||||
if(!st) {
|
||||
buffer_no_cacheable(w->response.data);
|
||||
buffer_svg(w->response.data, "chart not found", NAN, "", NULL, NULL, -1, scale, 0);
|
||||
ret = 200;
|
||||
goto cleanup;
|
||||
}
|
||||
st->last_accessed_time = now_realtime_sec();
|
||||
|
||||
RRDCALC *rc = NULL;
|
||||
if(alarm) {
|
||||
rc = rrdcalc_find(st, alarm);
|
||||
if (!rc) {
|
||||
buffer_no_cacheable(w->response.data);
|
||||
buffer_svg(w->response.data, "alarm not found", NAN, "", NULL, NULL, -1, scale, 0);
|
||||
ret = 200;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
long long multiply = (multiply_str && *multiply_str )?str2l(multiply_str):1;
|
||||
long long divide = (divide_str && *divide_str )?str2l(divide_str):1;
|
||||
long long before = (before_str && *before_str )?str2l(before_str):0;
|
||||
long long after = (after_str && *after_str )?str2l(after_str):-st->update_every;
|
||||
int points = (points_str && *points_str )?str2i(points_str):1;
|
||||
int precision = (precision_str && *precision_str)?str2i(precision_str):-1;
|
||||
|
||||
if(!multiply) multiply = 1;
|
||||
if(!divide) divide = 1;
|
||||
|
||||
int refresh = 0;
|
||||
if(refresh_str && *refresh_str) {
|
||||
if(!strcmp(refresh_str, "auto")) {
|
||||
if(rc) refresh = rc->update_every;
|
||||
else if(options & RRDR_OPTION_NOT_ALIGNED)
|
||||
refresh = st->update_every;
|
||||
else {
|
||||
refresh = (int)(before - after);
|
||||
if(refresh < 0) refresh = -refresh;
|
||||
}
|
||||
}
|
||||
else {
|
||||
refresh = str2i(refresh_str);
|
||||
if(refresh < 0) refresh = -refresh;
|
||||
}
|
||||
}
|
||||
|
||||
if(!label) {
|
||||
if(alarm) {
|
||||
char *s = (char *)alarm;
|
||||
while(*s) {
|
||||
if(*s == '_') *s = ' ';
|
||||
s++;
|
||||
}
|
||||
label = alarm;
|
||||
}
|
||||
else if(dimensions) {
|
||||
const char *dim = buffer_tostring(dimensions);
|
||||
if(*dim == '|') dim++;
|
||||
label = dim;
|
||||
}
|
||||
else
|
||||
label = st->name;
|
||||
}
|
||||
if(!units) {
|
||||
if(alarm) {
|
||||
if(rc->units)
|
||||
units = rc->units;
|
||||
else
|
||||
units = "";
|
||||
}
|
||||
else if(options & RRDR_OPTION_PERCENTAGE)
|
||||
units = "%";
|
||||
else
|
||||
units = st->units;
|
||||
}
|
||||
|
||||
debug(D_WEB_CLIENT, "%llu: API command 'badge.svg' for chart '%s', alarm '%s', dimensions '%s', after '%lld', before '%lld', points '%d', group '%d', options '0x%08x'"
|
||||
, w->id
|
||||
, chart
|
||||
, alarm?alarm:""
|
||||
, (dimensions)?buffer_tostring(dimensions):""
|
||||
, after
|
||||
, before
|
||||
, points
|
||||
, group
|
||||
, options
|
||||
);
|
||||
|
||||
if(rc) {
|
||||
if (refresh > 0) {
|
||||
buffer_sprintf(w->response.header, "Refresh: %d\r\n", refresh);
|
||||
w->response.data->expires = now_realtime_sec() + refresh;
|
||||
}
|
||||
else buffer_no_cacheable(w->response.data);
|
||||
|
||||
if(!value_color) {
|
||||
switch(rc->status) {
|
||||
case RRDCALC_STATUS_CRITICAL:
|
||||
value_color = "red";
|
||||
break;
|
||||
|
||||
case RRDCALC_STATUS_WARNING:
|
||||
value_color = "orange";
|
||||
break;
|
||||
|
||||
case RRDCALC_STATUS_CLEAR:
|
||||
value_color = "brightgreen";
|
||||
break;
|
||||
|
||||
case RRDCALC_STATUS_UNDEFINED:
|
||||
value_color = "lightgrey";
|
||||
break;
|
||||
|
||||
case RRDCALC_STATUS_UNINITIALIZED:
|
||||
value_color = "#000";
|
||||
break;
|
||||
|
||||
default:
|
||||
value_color = "grey";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
buffer_svg(w->response.data,
|
||||
label,
|
||||
(isnan(rc->value)||isinf(rc->value)) ? rc->value : rc->value * multiply / divide,
|
||||
units,
|
||||
label_color,
|
||||
value_color,
|
||||
precision,
|
||||
scale,
|
||||
options
|
||||
);
|
||||
ret = 200;
|
||||
}
|
||||
else {
|
||||
time_t latest_timestamp = 0;
|
||||
int value_is_null = 1;
|
||||
calculated_number n = NAN;
|
||||
ret = 500;
|
||||
|
||||
// if the collected value is too old, don't calculate its value
|
||||
if (rrdset_last_entry_t(st) >= (now_realtime_sec() - (st->update_every * st->gap_when_lost_iterations_above)))
|
||||
ret = rrdset2value_api_v1(st, w->response.data, &n, (dimensions) ? buffer_tostring(dimensions) : NULL
|
||||
, points, after, before, group, 0, options, NULL, &latest_timestamp, &value_is_null);
|
||||
|
||||
// if the value cannot be calculated, show empty badge
|
||||
if (ret != 200) {
|
||||
buffer_no_cacheable(w->response.data);
|
||||
value_is_null = 1;
|
||||
n = 0;
|
||||
ret = 200;
|
||||
}
|
||||
else if (refresh > 0) {
|
||||
buffer_sprintf(w->response.header, "Refresh: %d\r\n", refresh);
|
||||
w->response.data->expires = now_realtime_sec() + refresh;
|
||||
}
|
||||
else buffer_no_cacheable(w->response.data);
|
||||
|
||||
// render the badge
|
||||
buffer_svg(w->response.data,
|
||||
label,
|
||||
(value_is_null)?NAN:(n * multiply / divide),
|
||||
units,
|
||||
label_color,
|
||||
value_color,
|
||||
precision,
|
||||
scale,
|
||||
options
|
||||
);
|
||||
}
|
||||
|
||||
cleanup:
|
||||
buffer_free(dimensions);
|
||||
return ret;
|
||||
}
|
||||
|
||||
// returns the HTTP code
|
||||
inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, char *url) {
|
||||
debug(D_WEB_CLIENT, "%llu: API v1 data with URL '%s'", w->id, url);
|
||||
|
@ -645,7 +256,7 @@ inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, c
|
|||
, *group_time_str = NULL
|
||||
, *points_str = NULL;
|
||||
|
||||
int group = GROUP_AVERAGE;
|
||||
int group = RRDR_GROUPING_AVERAGE;
|
||||
uint32_t format = DATASOURCE_JSON;
|
||||
uint32_t options = 0x00000000;
|
||||
|
||||
|
@ -673,7 +284,7 @@ inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, c
|
|||
else if(!strcmp(name, "points")) points_str = value;
|
||||
else if(!strcmp(name, "gtime")) group_time_str = value;
|
||||
else if(!strcmp(name, "group")) {
|
||||
group = web_client_api_request_v1_data_group(value, GROUP_AVERAGE);
|
||||
group = web_client_api_request_v1_data_group(value, RRDR_GROUPING_AVERAGE);
|
||||
}
|
||||
else if(!strcmp(name, "format")) {
|
||||
format = web_client_api_request_v1_data_format(value);
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
#include "web/api/badges/web_buffer_svg.h"
|
||||
#include "rrd2json.h"
|
||||
|
||||
extern int web_client_api_request_v1_data_group(char *name, int def);
|
||||
extern uint32_t web_client_api_request_v1_data_options(char *o);
|
||||
extern uint32_t web_client_api_request_v1_data_format(char *name);
|
||||
extern uint32_t web_client_api_request_v1_data_google_format(char *name);
|
||||
|
@ -17,9 +16,7 @@ extern int web_client_api_request_v1_alarm_log(RRDHOST *host, struct web_client
|
|||
extern int web_client_api_request_single_chart(RRDHOST *host, struct web_client *w, char *url, void callback(RRDSET *st, BUFFER *buf));
|
||||
extern int web_client_api_request_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url);
|
||||
extern int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w, char *url);
|
||||
extern int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url);
|
||||
extern int web_client_api_request_v1_chart(RRDHOST *host, struct web_client *w, char *url);
|
||||
extern int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *url);
|
||||
extern int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, char *url);
|
||||
extern int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *w, char *url);
|
||||
extern int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url);
|
||||
|
|
|
@ -23,8 +23,6 @@ dist_web_DATA = \
|
|||
goto-host-from-alarm.html \
|
||||
index.html \
|
||||
infographic.html \
|
||||
netdata-swagger.yaml \
|
||||
netdata-swagger.json \
|
||||
robots.txt \
|
||||
refresh-badges.js \
|
||||
registry.html \
|
||||
|
|
|
@ -560,31 +560,6 @@ void buffer_data_options2string(BUFFER *wb, uint32_t options) {
|
|||
}
|
||||
}
|
||||
|
||||
const char *group_method2string(int group) {
|
||||
switch(group) {
|
||||
case GROUP_UNDEFINED:
|
||||
return "";
|
||||
|
||||
case GROUP_AVERAGE:
|
||||
return "average";
|
||||
|
||||
case GROUP_MIN:
|
||||
return "min";
|
||||
|
||||
case GROUP_MAX:
|
||||
return "max";
|
||||
|
||||
case GROUP_SUM:
|
||||
return "sum";
|
||||
|
||||
case GROUP_INCREMENTAL_SUM:
|
||||
return "incremental-sum";
|
||||
|
||||
default:
|
||||
return "unknown-group-method";
|
||||
}
|
||||
}
|
||||
|
||||
static inline int check_host_and_call(RRDHOST *host, struct web_client *w, char *url, int (*func)(RRDHOST *, struct web_client *, char *)) {
|
||||
if(unlikely(host->rrd_memory_mode == RRD_MEMORY_MODE_NONE)) {
|
||||
buffer_flush(w->response.data);
|
||||
|
|
|
@ -184,9 +184,6 @@ extern ssize_t web_client_read_file(struct web_client *w);
|
|||
extern void web_client_process_request(struct web_client *w);
|
||||
extern void web_client_request_done(struct web_client *w);
|
||||
|
||||
extern int web_client_api_request_v1_data_group(char *name, int def);
|
||||
extern const char *group_method2string(int group);
|
||||
|
||||
extern void buffer_data_options2string(BUFFER *wb, uint32_t options);
|
||||
|
||||
extern int mysendfile(struct web_client *w, char *filename);
|
||||
|
|
|
@ -3,7 +3,8 @@
|
|||
#ifndef NETDATA_WEB_CLIENT_CACHE_H
|
||||
#define NETDATA_WEB_CLIENT_CACHE_H
|
||||
|
||||
#include "web_server.h"
|
||||
#include "libnetdata/libnetdata.h"
|
||||
#include "web_client.h"
|
||||
|
||||
struct clients_cache {
|
||||
pid_t pid;
|
||||
|
@ -25,4 +26,6 @@ extern struct web_client *web_client_get_from_cache_or_allocate();
|
|||
extern void web_client_cache_destroy(void);
|
||||
extern void web_client_cache_verify(int force);
|
||||
|
||||
#include "web_server.h"
|
||||
|
||||
#endif //NETDATA_WEB_CLIENT_CACHE_H
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
#ifndef NETDATA_WEB_SERVER_H
|
||||
#define NETDATA_WEB_SERVER_H 1
|
||||
|
||||
#include "daemon/common.h"
|
||||
#include "libnetdata/libnetdata.h"
|
||||
#include "web_client.h"
|
||||
|
||||
#ifndef API_LISTEN_PORT
|
||||
|
@ -55,4 +55,6 @@ extern struct web_client *web_client_create_on_listenfd(int listener);
|
|||
#include "multi/multi-threaded.h"
|
||||
#include "static/static-threaded.h"
|
||||
|
||||
#include "daemon/common.h"
|
||||
|
||||
#endif /* NETDATA_WEB_SERVER_H */
|
||||
|
|
Loading…
Add table
Reference in a new issue