0
0
Fork 0
mirror of https://github.com/netdata/netdata.git synced 2025-04-24 21:24:12 +00:00

/api/v2 part 4 ()

* expose the order of group by

* key renames in json wrapper v2

* added group by context and group by units

* added view_average_values

* fix for view_average_values when percentage is specified

* option group-by-labels is enabling the exposure of all the labels that are used for each of the final grouped dimensions

* when executing group by queries, allocate one dimension data at a time - not all of them

* respect hidden dimensions

* cancel running data query on socket error

* use poll to detect socket errors

* use POLLRDHUP to detect half closed connections

* make sure POLLRDHUP is available

* do not destroy aral-by-size arals

* completed documentation of /api/v2/data.

* moved min, max back to view; updated swagger yaml and json

* default format for /api/v2/data is json2
This commit is contained in:
Costa Tsaousis 2023-03-13 23:39:06 +02:00 committed by GitHub
parent 8068c952d8
commit cd50bf4236
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
35 changed files with 2675 additions and 964 deletions

View file

@ -258,6 +258,8 @@ typedef struct query_metric {
#define MAX_QUERY_TARGET_ID_LENGTH 255
typedef bool (*interrupt_callback_t)(void *data);
typedef struct query_target_request {
size_t version;
@ -303,6 +305,9 @@ typedef struct query_target_request {
RRDR_GROUP_BY_FUNCTION group_by_aggregate_function;
usec_t received_ut;
interrupt_callback_t interrupt_callback;
void *interrupt_callback_data;
} QUERY_TARGET_REQUEST;
#define GROUP_BY_MAX_LABEL_KEYS 10

View file

@ -399,7 +399,7 @@ size_t text_sanitize(unsigned char *dst, const unsigned char *src, size_t dst_si
// find how big this character is (2-4 bytes)
size_t utf_character_size = 2;
while(utf_character_size <= 4 && src[utf_character_size] && IS_UTF8_BYTE(src[utf_character_size]) && !IS_UTF8_STARTBYTE(src[utf_character_size]))
while(utf_character_size < 4 && src[utf_character_size] && IS_UTF8_BYTE(src[utf_character_size]) && !IS_UTF8_STARTBYTE(src[utf_character_size]))
utf_character_size++;
if(utf) {

View file

@ -885,10 +885,10 @@ void aral_by_size_release(ARAL *ar) {
fatal("ARAL BY SIZE: double release detected");
aral_by_size_globals.array[size].refcount--;
if(!aral_by_size_globals.array[size].refcount) {
aral_destroy(aral_by_size_globals.array[size].ar);
aral_by_size_globals.array[size].ar = NULL;
}
// if(!aral_by_size_globals.array[size].refcount) {
// aral_destroy(aral_by_size_globals.array[size].ar);
// aral_by_size_globals.array[size].ar = NULL;
// }
netdata_spinlock_unlock(&aral_by_size_globals.spinlock);
}

View file

@ -3,6 +3,7 @@
#ifndef NETDATA_WEB_BUFFER_H
#define NETDATA_WEB_BUFFER_H 1
#include "../string/utf8.h"
#include "../libnetdata.h"
#define WEB_DATA_LENGTH_INCREASE_STEP 1024
@ -203,18 +204,56 @@ static inline void buffer_strcat(BUFFER *wb, const char *txt) {
static inline void buffer_json_strcat(BUFFER *wb, const char *txt) {
if(unlikely(!txt || !*txt)) return;
const char *t = txt;
const unsigned char *t = (const unsigned char *)txt;
while(*t) {
buffer_need_bytes(wb, 100);
char *s = &wb->buffer[wb->len];
char *d = s;
const char *e = &wb->buffer[wb->size - 1]; // remove 1 to make room for the escape character
buffer_need_bytes(wb, 110);
unsigned char *s = (unsigned char *)&wb->buffer[wb->len];
unsigned char *d = s;
const unsigned char *e = (unsigned char *)&wb->buffer[wb->size - 10]; // make room for the max escape sequence
while(*t && d < e) {
if(unlikely(*t == '\\' || *t == '\"'))
*d++ = '\\';
#ifdef BUFFER_JSON_ESCAPE_UTF
if(unlikely(IS_UTF8_STARTBYTE(*t) && IS_UTF8_BYTE(t[1]))) {
// UTF-8 multi-byte encoded character
*d++ = *t++;
// find how big this character is (2-4 bytes)
size_t utf_character_size = 2;
while(utf_character_size < 4 && t[utf_character_size] && IS_UTF8_BYTE(t[utf_character_size]) && !IS_UTF8_STARTBYTE(t[utf_character_size]))
utf_character_size++;
uint32_t code_point = 0;
for (size_t i = 0; i < utf_character_size; i++) {
code_point <<= 6;
code_point |= (t[i] & 0x3F);
}
t += utf_character_size;
// encode as \u escape sequence
*d++ = '\\';
*d++ = 'u';
*d++ = hex_digits[(code_point >> 12) & 0xf];
*d++ = hex_digits[(code_point >> 8) & 0xf];
*d++ = hex_digits[(code_point >> 4) & 0xf];
*d++ = hex_digits[code_point & 0xf];
}
else
#endif
if(unlikely(*t < ' ')) {
uint32_t v = *t++;
*d++ = '\\';
*d++ = 'u';
*d++ = hex_digits[(v >> 12) & 0xf];
*d++ = hex_digits[(v >> 8) & 0xf];
*d++ = hex_digits[(v >> 4) & 0xf];
*d++ = hex_digits[v & 0xf];
}
else {
if (unlikely(*t == '\\' || *t == '\"'))
*d++ = '\\';
*d++ = *t++;
}
}
wb->len += d - s;

View file

@ -1,5 +1,13 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#ifndef _GNU_SOURCE
#define _GNU_SOURCE // for POLLRDHUP
#endif
#ifndef __BSD_VISIBLE
#define __BSD_VISIBLE // for POLLRDHUP
#endif
#include "../libnetdata.h"
// --------------------------------------------------------------------------------------------------------------------
@ -11,6 +19,46 @@
#define LARGE_SOCK_SIZE 4096
#endif
bool fd_is_socket(int fd) {
int type;
socklen_t len = sizeof(type);
if (getsockopt(fd, SOL_SOCKET, SO_TYPE, &type, &len) == -1)
return false;
return true;
}
bool sock_has_output_error(int fd) {
if(fd < 0) {
//internal_error(true, "invalid socket %d", fd);
return false;
}
// if(!fd_is_socket(fd)) {
// //internal_error(true, "fd %d is not a socket", fd);
// return false;
// }
short int errors = POLLERR | POLLHUP | POLLNVAL;
#ifdef POLLRDHUP
errors |= POLLRDHUP;
#endif
struct pollfd pfd = {
.fd = fd,
.events = POLLOUT | errors,
.revents = 0,
};
if(poll(&pfd, 1, 0) == -1) {
//internal_error(true, "poll() failed");
return false;
}
return ((pfd.revents & errors) || !(pfd.revents & POLLOUT));
}
int sock_setnonblock(int fd) {
int flags;

View file

@ -74,6 +74,9 @@ ssize_t recv_timeout(int sockfd, void *buf, size_t len, int flags, int timeout);
ssize_t send_timeout(int sockfd, void *buf, size_t len, int flags, int timeout);
#endif
bool fd_is_socket(int fd);
bool sock_has_output_error(int fd);
int sock_setnonblock(int fd);
int sock_delnonblock(int fd);
int sock_setreuse(int fd, int reuse);

View file

@ -3,7 +3,7 @@
#ifndef NETDATA_STRING_UTF8_H
#define NETDATA_STRING_UTF8_H 1
#define IS_UTF8_BYTE(x) (x & 0x80)
#define IS_UTF8_STARTBYTE(x) (IS_UTF8_BYTE(x)&&(x & 0x40))
#define IS_UTF8_BYTE(x) ((x) & 0x80)
#define IS_UTF8_STARTBYTE(x) (IS_UTF8_BYTE(x)&&((x) & 0x40))
#endif /* NETDATA_STRING_UTF8_H */

View file

@ -154,7 +154,6 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable) {
NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
NETDATA_DOUBLE *ar = &r->ar[ i * r->d ];
uint32_t *gbc = &r->gbc [ i * r->d ];
time_t now = r->t[i];
@ -211,15 +210,13 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable) {
buffer_fast_strcat(wb, post_date, post_date_len);
}
if(unlikely((options & RRDR_OPTION_PERCENTAGE) && !(options & (RRDR_OPTION_INTERNAL_GBC|RRDR_OPTION_INTERNAL_AR)))) {
if(unlikely((options & RRDR_OPTION_PERCENTAGE) && !(options & (RRDR_OPTION_INTERNAL_AR)))) {
total = 0;
for(c = 0; c < used ;c++) {
if(unlikely(!(r->od[c] & RRDR_DIMENSION_QUERIED))) continue;
NETDATA_DOUBLE n;
if(unlikely(options & RRDR_OPTION_INTERNAL_GBC))
n = gbc[c];
else if(unlikely(options & RRDR_OPTION_INTERNAL_AR))
if(unlikely(options & RRDR_OPTION_INTERNAL_AR))
n = ar[c];
else
n = cn[c];
@ -239,9 +236,7 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable) {
continue;
NETDATA_DOUBLE n;
if(unlikely(options & RRDR_OPTION_INTERNAL_GBC))
n = gbc[c];
else if(unlikely(options & RRDR_OPTION_INTERNAL_AR))
if(unlikely(options & RRDR_OPTION_INTERNAL_AR))
n = ar[c];
else
n = cn[c];
@ -251,7 +246,7 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable) {
if(unlikely( options & RRDR_OPTION_OBJECTSROWS ))
buffer_sprintf(wb, "%s%s%s: ", kq, string2str(r->dn[c]), kq);
if(co[c] & RRDR_VALUE_EMPTY && !(options & (RRDR_OPTION_INTERNAL_AR | RRDR_OPTION_INTERNAL_GBC))) {
if(co[c] & RRDR_VALUE_EMPTY && !(options & (RRDR_OPTION_INTERNAL_AR))) {
if(unlikely(options & RRDR_OPTION_NULL2ZERO))
buffer_fast_strcat(wb, "0", 1);
else
@ -261,7 +256,7 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable) {
if(unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
n = -n;
if(unlikely((options & RRDR_OPTION_PERCENTAGE) && !(options & (RRDR_OPTION_INTERNAL_GBC|RRDR_OPTION_INTERNAL_AR)))) {
if(unlikely((options & RRDR_OPTION_PERCENTAGE) && !(options & (RRDR_OPTION_INTERNAL_AR)))) {
n = n * 100 / total;
if(unlikely(i == start && c == 0)) {
@ -285,3 +280,132 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable) {
buffer_strcat(wb, finish);
//info("RRD2JSON(): %s: END", r->st->id);
}
void rrdr2json_v2(RRDR *r, BUFFER *wb) {
QUERY_TARGET *qt = r->internal.qt;
RRDR_OPTIONS options = qt->request.options;
bool expose_gbc = query_target_aggregatable(qt);
buffer_json_member_add_object(wb, "result");
buffer_json_member_add_array(wb, "labels");
buffer_json_add_array_item_string(wb, "time");
long d, i;
const long used = (long)r->d;
for(d = 0, i = 0; d < used ; d++) {
if(!rrdr_dimension_should_be_exposed(r->od[d], options))
continue;
buffer_json_add_array_item_string(wb, string2str(r->dn[d]));
i++;
}
buffer_json_array_close(wb); // labels
buffer_json_member_add_object(wb, "point");
buffer_json_member_add_uint64(wb, "value", 0);
buffer_json_member_add_uint64(wb, "ar", 1);
buffer_json_member_add_uint64(wb, "pa", 2);
if(expose_gbc)
buffer_json_member_add_uint64(wb, "count", 3);
buffer_json_object_close(wb);
buffer_json_member_add_array(wb, "data");
if(i) {
long start = 0, end = rrdr_rows(r), step = 1;
if (!(options & RRDR_OPTION_REVERSED)) {
start = rrdr_rows(r) - 1;
end = -1;
step = -1;
}
// for each line in the array
for (i = start; i != end; i += step) {
NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
NETDATA_DOUBLE *ar = &r->ar[ i * r->d ];
uint32_t *gbc = &r->gbc [ i * r->d ];
time_t now = r->t[i];
buffer_json_add_array_item_array(wb); // row
if (options & RRDR_OPTION_MILLISECONDS)
buffer_json_add_array_item_time_ms(wb, now); // the time
else
buffer_json_add_array_item_time_t(wb, now); // the time
NETDATA_DOUBLE total = 1;
if(unlikely((options & RRDR_OPTION_PERCENTAGE))) {
total = 0;
for(d = 0; d < used ; d++) {
if(unlikely(!(r->od[d] & RRDR_DIMENSION_QUERIED))) continue;
NETDATA_DOUBLE n = cn[d];
if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
n = -n;
total += n;
}
// prevent a division by zero
if(total == 0) total = 1;
}
for (d = 0; d < used; d++) {
if (!rrdr_dimension_should_be_exposed(r->od[d], options))
continue;
RRDR_VALUE_FLAGS o = co[d];
buffer_json_add_array_item_array(wb); // point
// add the value
NETDATA_DOUBLE n = cn[d];
if(o & RRDR_VALUE_EMPTY) {
if (unlikely(options & RRDR_OPTION_NULL2ZERO))
buffer_json_add_array_item_double(wb, 0);
else
buffer_json_add_array_item_double(wb, NAN);
}
else {
if (unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
n = -n;
if (unlikely((options & RRDR_OPTION_PERCENTAGE))) {
n = n * 100 / total;
}
if(unlikely(i == start && d == 0)) {
r->view.min = r->view.max = n;
}
else {
if (n < r->view.min) r->view.min = n;
if (n > r->view.max) r->view.max = n;
}
buffer_json_add_array_item_double(wb, n);
}
// add the anomaly
buffer_json_add_array_item_double(wb, ar[d]);
// add the point annotations
buffer_json_add_array_item_uint64(wb, o);
// add the count
if(expose_gbc)
buffer_json_add_array_item_uint64(wb, gbc[d]);
buffer_json_array_close(wb); // point
}
buffer_json_array_close(wb); // row
}
}
buffer_json_array_close(wb); // data
buffer_json_object_close(wb); // annotations
}

View file

@ -6,5 +6,6 @@
#include "../rrd2json.h"
void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable);
void rrdr2json_v2(RRDR *r, BUFFER *wb);
#endif //NETDATA_API_FORMATTER_JSON_H

View file

@ -676,7 +676,7 @@ static inline long query_target_metrics_latest_values(BUFFER *wb, const char *ke
return i;
}
static inline size_t rrdr_latest_values(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options) {
static inline size_t rrdr_dimension_latest_values(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options) {
size_t c, i;
buffer_json_member_add_array(wb, key);
@ -732,11 +732,54 @@ static inline size_t rrdr_latest_values(BUFFER *wb, const char *key, RRDR *r, RR
return i;
}
void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, DATASOURCE_FORMAT format, RRDR_OPTIONS options,
RRDR_TIME_GROUPING group_method)
{
static inline void rrdr_dimension_average_values(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options) {
if(!r->dv)
return;
buffer_json_member_add_array(wb, key);
bool percentage = r->internal.qt->request.options & RRDR_OPTION_PERCENTAGE;
NETDATA_DOUBLE total = 0;
if(percentage) {
for(size_t c = 0; c < r->d ; c++) {
if(!(r->od[c] & RRDR_DIMENSION_QUERIED))
continue;
total += r->dv[c];
}
}
for(size_t c = 0; c < r->d ; c++) {
if(!rrdr_dimension_should_be_exposed(r->od[c], options))
continue;
if(percentage)
buffer_json_add_array_item_double(wb, r->dv[c] * 100.0 / total);
else
buffer_json_add_array_item_double(wb, r->dv[c]);
}
buffer_json_array_close(wb);
}
static void rrdr_timings_v12(BUFFER *wb, const char *key, RRDR *r) {
QUERY_TARGET *qt = r->internal.qt;
qt->timings.finished_ut = now_monotonic_usec();
buffer_json_member_add_object(wb, key);
buffer_json_member_add_double(wb, "prep_ms", (NETDATA_DOUBLE)(qt->timings.preprocessed_ut - qt->timings.received_ut) / USEC_PER_MS);
buffer_json_member_add_double(wb, "query_ms", (NETDATA_DOUBLE)(qt->timings.executed_ut - qt->timings.preprocessed_ut) / USEC_PER_MS);
buffer_json_member_add_double(wb, "group_by_ms", (NETDATA_DOUBLE)(qt->timings.group_by_ut - qt->timings.executed_ut) / USEC_PER_MS);
buffer_json_member_add_double(wb, "output_ms", (NETDATA_DOUBLE)(qt->timings.finished_ut - qt->timings.group_by_ut) / USEC_PER_MS);
buffer_json_member_add_double(wb, "total_ms", (NETDATA_DOUBLE)(qt->timings.finished_ut - qt->timings.received_ut) / USEC_PER_MS);
buffer_json_object_close(wb);
}
void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb) {
QUERY_TARGET *qt = r->internal.qt;
DATASOURCE_FORMAT format = qt->request.format;
RRDR_OPTIONS options = qt->request.options;
long rows = rrdr_rows(r);
char kq[2] = "", // key quote
@ -762,7 +805,7 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, DATASOURCE_FORMAT format, RRDR
buffer_json_member_add_time_t(wb, "last_entry", qt->db.last_time_s);
buffer_json_member_add_time_t(wb, "after", r->view.after);
buffer_json_member_add_time_t(wb, "before", r->view.before);
buffer_json_member_add_string(wb, "group", time_grouping_tostring(group_method));
buffer_json_member_add_string(wb, "group", time_grouping_tostring(qt->request.time_group_method));
web_client_api_request_v1_data_options_to_buffer_json_array(wb, "options", r->view.options);
if(!rrdr_dimension_names(wb, "dimension_names", r, options))
@ -788,7 +831,7 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, DATASOURCE_FORMAT format, RRDR
if(!query_target_metrics_latest_values(wb, "latest_values", r, options))
rows = 0;
size_t dimensions = rrdr_latest_values(wb, "view_latest_values", r, options);
size_t dimensions = rrdr_dimension_latest_values(wb, "view_latest_values", r, options);
if(!dimensions)
rows = 0;
@ -858,13 +901,53 @@ static void query_target_combined_chart_type(BUFFER *wb, QUERY_TARGET *qt, size_
buffer_json_member_add_string(wb, "chart_type", rrdset_type_name(rrdcontext_acquired_chart_type(qt->contexts.array[0].rca)));
}
static void rrdr_dimension_units_array_v2(BUFFER *wb, RRDR *r, RRDR_OPTIONS options) {
static void rrdr_grouped_by_array_v2(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options __maybe_unused) {
QUERY_TARGET *qt = r->internal.qt;
buffer_json_member_add_array(wb, key);
if(qt->request.group_by & RRDR_GROUP_BY_SELECTED)
buffer_json_add_array_item_string(wb, "selected");
else {
if(qt->request.group_by & RRDR_GROUP_BY_DIMENSION)
buffer_json_add_array_item_string(wb, "dimension");
if(qt->request.group_by & RRDR_GROUP_BY_INSTANCE)
buffer_json_add_array_item_string(wb, "instance");
if(qt->request.group_by & RRDR_GROUP_BY_LABEL) {
BUFFER *b = buffer_create(0, NULL);
for (size_t l = 0; l < qt->group_by.used; l++) {
buffer_flush(b);
buffer_fast_strcat(b, "label:", 6);
buffer_strcat(b, qt->group_by.label_keys[l]);
buffer_json_add_array_item_string(wb, buffer_tostring(b));
}
buffer_free(b);
}
if(qt->request.group_by & RRDR_GROUP_BY_NODE)
buffer_json_add_array_item_string(wb, "node");
if(qt->request.group_by & RRDR_GROUP_BY_CONTEXT)
buffer_json_add_array_item_string(wb, "context");
if(qt->request.group_by & RRDR_GROUP_BY_UNITS)
buffer_json_add_array_item_string(wb, "units");
}
buffer_json_array_close(wb); // group_by_order
}
static void rrdr_dimension_units_array_v2(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options) {
if(!r->du)
return;
bool percentage = query_target_has_percentage_units(r->internal.qt);
buffer_json_member_add_array(wb, "units");
buffer_json_member_add_array(wb, key);
for(size_t c = 0; c < r->d ; c++) {
if(!rrdr_dimension_should_be_exposed(r->od[c], options))
continue;
@ -877,11 +960,11 @@ static void rrdr_dimension_units_array_v2(BUFFER *wb, RRDR *r, RRDR_OPTIONS opti
buffer_json_array_close(wb);
}
static void rrdr_dimension_priority_array(BUFFER *wb, RRDR *r, RRDR_OPTIONS options) {
static void rrdr_dimension_priority_array_v2(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options) {
if(!r->dp)
return;
buffer_json_member_add_array(wb, "priorities");
buffer_json_member_add_array(wb, key);
for(size_t c = 0; c < r->d ; c++) {
if(!rrdr_dimension_should_be_exposed(r->od[c], options))
continue;
@ -891,11 +974,11 @@ static void rrdr_dimension_priority_array(BUFFER *wb, RRDR *r, RRDR_OPTIONS opti
buffer_json_array_close(wb);
}
static void rrdr_dimension_grouped_array(BUFFER *wb, RRDR *r, RRDR_OPTIONS options) {
static void rrdr_dimension_aggregated_array_v2(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options) {
if(!r->dgbc)
return;
buffer_json_member_add_array(wb, "grouped");
buffer_json_member_add_array(wb, key);
for(size_t c = 0; c < r->d ;c++) {
if(!rrdr_dimension_should_be_exposed(r->od[c], options))
continue;
@ -1089,12 +1172,9 @@ static void query_target_detailed_objects_tree(BUFFER *wb, RRDR *r, RRDR_OPTIONS
buffer_json_object_close(wb); // hosts
}
void rrdr_json_wrapper_begin2(RRDR *r, BUFFER *wb, DATASOURCE_FORMAT format, RRDR_OPTIONS options,
RRDR_TIME_GROUPING group_method)
{
void rrdr_json_wrapper_begin2(RRDR *r, BUFFER *wb) {
QUERY_TARGET *qt = r->internal.qt;
long rows = rrdr_rows(r);
RRDR_OPTIONS options = qt->request.options;
char kq[2] = "\"", // key quote
sq[2] = "\""; // string quote
@ -1181,7 +1261,6 @@ void rrdr_json_wrapper_begin2(RRDR *r, BUFFER *wb, DATASOURCE_FORMAT format, RRD
buffer_json_member_add_uint64(wb, "contexts_soft_hash", qt->versions.contexts_soft_hash);
buffer_json_object_close(wb);
size_t contexts;
buffer_json_member_add_object(wb, "summary");
struct summary_total_counts
nodes_totals = { 0 },
@ -1192,7 +1271,7 @@ void rrdr_json_wrapper_begin2(RRDR *r, BUFFER *wb, DATASOURCE_FORMAT format, RRD
label_key_value_totals = { 0 };
{
query_target_summary_nodes_v2(wb, qt, "nodes", &nodes_totals);
contexts = query_target_summary_contexts_v2(wb, qt, "contexts", &contexts_totals);
r->internal.contexts = query_target_summary_contexts_v2(wb, qt, "contexts", &contexts_totals);
query_target_summary_instances_v2(wb, qt, "instances", &instances_totals);
query_target_summary_dimensions_v12(wb, qt, "dimensions", true, &metrics_totals);
query_target_summary_labels_v12(wb, qt, "labels", true, &label_key_totals, &label_key_value_totals);
@ -1224,7 +1303,7 @@ void rrdr_json_wrapper_begin2(RRDR *r, BUFFER *wb, DATASOURCE_FORMAT format, RRD
buffer_json_member_add_time_t(wb, "first_entry", qt->db.first_time_s);
buffer_json_member_add_time_t(wb, "last_entry", qt->db.last_time_s);
buffer_json_member_add_array(wb, "tiers");
buffer_json_member_add_array(wb, "per_tier");
for(size_t tier = 0; tier < storage_tiers ; tier++) {
buffer_json_add_array_item_object(wb);
buffer_json_member_add_uint64(wb, "tier", tier);
@ -1238,39 +1317,6 @@ void rrdr_json_wrapper_begin2(RRDR *r, BUFFER *wb, DATASOURCE_FORMAT format, RRD
buffer_json_array_close(wb);
}
buffer_json_object_close(wb);
buffer_json_member_add_object(wb, "view");
{
query_target_title(wb, qt, contexts);
buffer_json_member_add_string(wb, "format", rrdr_format_to_string(format));
web_client_api_request_v1_data_options_to_buffer_json_array(wb, "options", r->view.options);
buffer_json_member_add_string(wb, "time_group", time_grouping_tostring(group_method));
buffer_json_member_add_time_t(wb, "update_every", r->view.update_every);
buffer_json_member_add_time_t(wb, "after", r->view.after);
buffer_json_member_add_time_t(wb, "before", r->view.before);
buffer_json_member_add_object(wb, "partial_data_trimming");
buffer_json_member_add_time_t(wb, "max_update_every", r->partial_data_trimming.max_update_every);
buffer_json_member_add_time_t(wb, "expected_after", r->partial_data_trimming.expected_after);
buffer_json_member_add_time_t(wb, "trimmed_after", r->partial_data_trimming.trimmed_after);
buffer_json_object_close(wb);
buffer_json_member_add_uint64(wb, "points", rows);
query_target_combined_units_v2(wb, qt, contexts);
query_target_combined_chart_type(wb, qt, contexts);
buffer_json_member_add_object(wb, "dimensions");
{
rrdr_dimension_ids(wb, "ids", r, options);
rrdr_dimension_names(wb, "names", r, options);
rrdr_dimension_units_array_v2(wb, r, options);
rrdr_dimension_priority_array(wb, r, options);
rrdr_dimension_grouped_array(wb, r, options);
size_t dims = rrdr_latest_values(wb, "view_latest_values", r, options);
buffer_json_member_add_uint64(wb, "count", dims);
}
buffer_json_object_close(wb);
}
buffer_json_object_close(wb);
}
//static void annotations_range_for_value_flags(RRDR *r, BUFFER *wb, DATASOURCE_FORMAT format __maybe_unused, RRDR_OPTIONS options, RRDR_VALUE_FLAGS flags, const char *type) {
@ -1346,145 +1392,57 @@ void rrdr_json_wrapper_begin2(RRDR *r, BUFFER *wb, DATASOURCE_FORMAT format, RRD
// buffer_json_array_close(wb); // annotations
//}
void rrdr2json_v2(RRDR *r __maybe_unused, BUFFER *wb, DATASOURCE_FORMAT format __maybe_unused, RRDR_OPTIONS options) {
bool expose_gbc = query_target_aggregatable(r->internal.qt);
buffer_json_member_add_object(wb, "result");
buffer_json_member_add_array(wb, "labels");
buffer_json_add_array_item_string(wb, "time");
long d, i;
const long used = (long)r->d;
for(d = 0, i = 0; d < used ; d++) {
if(!rrdr_dimension_should_be_exposed(r->od[d], options))
continue;
buffer_json_add_array_item_string(wb, string2str(r->dn[d]));
i++;
}
buffer_json_array_close(wb); // labels
buffer_json_member_add_object(wb, "point");
buffer_json_member_add_uint64(wb, "value", 0);
buffer_json_member_add_uint64(wb, "ar", 1);
buffer_json_member_add_uint64(wb, "pa", 2);
if(expose_gbc)
buffer_json_member_add_uint64(wb, "count", 3);
buffer_json_object_close(wb);
buffer_json_member_add_array(wb, "data");
if(i) {
long start = 0, end = rrdr_rows(r), step = 1;
if (!(options & RRDR_OPTION_REVERSED)) {
start = rrdr_rows(r) - 1;
end = -1;
step = -1;
}
// for each line in the array
for (i = start; i != end; i += step) {
NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
NETDATA_DOUBLE *ar = &r->ar[ i * r->d ];
uint32_t *gbc = &r->gbc [ i * r->d ];
time_t now = r->t[i];
buffer_json_add_array_item_array(wb); // row
if (options & RRDR_OPTION_MILLISECONDS)
buffer_json_add_array_item_time_ms(wb, now); // the time
else
buffer_json_add_array_item_time_t(wb, now); // the time
NETDATA_DOUBLE total = 1;
if(unlikely((options & RRDR_OPTION_PERCENTAGE) && !(options & (RRDR_OPTION_INTERNAL_GBC|RRDR_OPTION_INTERNAL_AR)))) {
total = 0;
for(d = 0; d < used ; d++) {
if(unlikely(!(r->od[d] & RRDR_DIMENSION_QUERIED))) continue;
NETDATA_DOUBLE n = cn[d];
if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
n = -n;
total += n;
}
// prevent a division by zero
if(total == 0) total = 1;
}
for (d = 0; d < used; d++) {
if (!rrdr_dimension_should_be_exposed(r->od[d], options))
continue;
RRDR_VALUE_FLAGS o = co[d];
buffer_json_add_array_item_array(wb); // point
// add the value
NETDATA_DOUBLE n = cn[d];
if(o & RRDR_VALUE_EMPTY) {
if (unlikely(options & RRDR_OPTION_NULL2ZERO))
buffer_json_add_array_item_double(wb, 0);
else
buffer_json_add_array_item_double(wb, NAN);
}
else {
if (unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
n = -n;
if (unlikely((options & RRDR_OPTION_PERCENTAGE))) {
n = n * 100 / total;
}
if(unlikely(i == start && d == 0)) {
r->view.min = r->view.max = n;
}
else {
if (n < r->view.min) r->view.min = n;
if (n > r->view.max) r->view.max = n;
}
buffer_json_add_array_item_double(wb, n);
}
// add the anomaly
buffer_json_add_array_item_double(wb, ar[d]);
// add the point annotations
buffer_json_add_array_item_uint64(wb, o);
// add the count
if(expose_gbc)
buffer_json_add_array_item_uint64(wb, gbc[d]);
buffer_json_array_close(wb); // point
}
buffer_json_array_close(wb); // row
}
}
buffer_json_array_close(wb); // data
buffer_json_object_close(wb); // annotations
}
void rrdr_json_wrapper_end(RRDR *r, BUFFER *wb, DATASOURCE_FORMAT format __maybe_unused, RRDR_OPTIONS options __maybe_unused) {
QUERY_TARGET *qt = r->internal.qt;
void rrdr_json_wrapper_end(RRDR *r, BUFFER *wb) {
buffer_json_member_add_double(wb, "min", r->view.min);
buffer_json_member_add_double(wb, "max", r->view.max);
qt->timings.finished_ut = now_monotonic_usec();
buffer_json_member_add_object(wb, "timings");
buffer_json_member_add_double(wb, "prep_ms", (NETDATA_DOUBLE)(qt->timings.preprocessed_ut - qt->timings.received_ut) / USEC_PER_MS);
buffer_json_member_add_double(wb, "query_ms", (NETDATA_DOUBLE)(qt->timings.executed_ut - qt->timings.preprocessed_ut) / USEC_PER_MS);
buffer_json_member_add_double(wb, "group_by_ms", (NETDATA_DOUBLE)(qt->timings.group_by_ut - qt->timings.executed_ut) / USEC_PER_MS);
buffer_json_member_add_double(wb, "output_ms", (NETDATA_DOUBLE)(qt->timings.finished_ut - qt->timings.group_by_ut) / USEC_PER_MS);
buffer_json_member_add_double(wb, "total_ms", (NETDATA_DOUBLE)(qt->timings.finished_ut - qt->timings.received_ut) / USEC_PER_MS);
buffer_json_object_close(wb);
rrdr_timings_v12(wb, "timings", r);
buffer_json_finalize(wb);
}
void rrdr_json_wrapper_end2(RRDR *r, BUFFER *wb) {
QUERY_TARGET *qt = r->internal.qt;
DATASOURCE_FORMAT format = qt->request.format;
RRDR_OPTIONS options = qt->request.options;
buffer_json_member_add_object(wb, "view");
{
query_target_title(wb, qt, r->internal.contexts);
buffer_json_member_add_string(wb, "format", rrdr_format_to_string(format));
web_client_api_request_v1_data_options_to_buffer_json_array(wb, "options", r->view.options);
buffer_json_member_add_string(wb, "time_group", time_grouping_tostring(qt->request.time_group_method));
buffer_json_member_add_time_t(wb, "update_every", r->view.update_every);
buffer_json_member_add_time_t(wb, "after", r->view.after);
buffer_json_member_add_time_t(wb, "before", r->view.before);
buffer_json_member_add_object(wb, "partial_data_trimming");
buffer_json_member_add_time_t(wb, "max_update_every", r->partial_data_trimming.max_update_every);
buffer_json_member_add_time_t(wb, "expected_after", r->partial_data_trimming.expected_after);
buffer_json_member_add_time_t(wb, "trimmed_after", r->partial_data_trimming.trimmed_after);
buffer_json_object_close(wb);
buffer_json_member_add_uint64(wb, "points", rrdr_rows(r));
query_target_combined_units_v2(wb, qt, r->internal.contexts);
query_target_combined_chart_type(wb, qt, r->internal.contexts);
buffer_json_member_add_object(wb, "dimensions");
{
rrdr_grouped_by_array_v2(wb, "grouped_by", r, options);
rrdr_dimension_ids(wb, "ids", r, options);
rrdr_dimension_names(wb, "names", r, options);
rrdr_dimension_units_array_v2(wb, "units", r, options);
rrdr_dimension_priority_array_v2(wb, "priorities", r, options);
rrdr_dimension_aggregated_array_v2(wb, "aggregated", r, options);
rrdr_dimension_average_values(wb, "view_average_values", r, options);
size_t dims = rrdr_dimension_latest_values(wb, "view_latest_values", r, options);
buffer_json_member_add_uint64(wb, "count", dims);
rrdr_json_group_by_labels(wb, "labels", r, options);
}
buffer_json_object_close(wb); // dimensions
buffer_json_member_add_double(wb, "min", r->view.min);
buffer_json_member_add_double(wb, "max", r->view.max);
}
buffer_json_object_close(wb); // view
rrdr_timings_v12(wb, "timings", r);
buffer_json_finalize(wb);
}

View file

@ -6,13 +6,13 @@
#include "rrd2json.h"
#include "web/api/queries/query.h"
typedef void (*wrapper_begin_t)(RRDR *r, BUFFER *wb, DATASOURCE_FORMAT format, RRDR_OPTIONS options, RRDR_TIME_GROUPING group_method);
typedef void (*wrapper_end_t)(RRDR *r, BUFFER *wb, DATASOURCE_FORMAT format, RRDR_OPTIONS options);
typedef void (*wrapper_begin_t)(RRDR *r, BUFFER *wb);
typedef void (*wrapper_end_t)(RRDR *r, BUFFER *wb);
void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, DATASOURCE_FORMAT format, RRDR_OPTIONS options, RRDR_TIME_GROUPING group_method);
void rrdr2json_v2(RRDR *r, BUFFER *wb, DATASOURCE_FORMAT format, RRDR_OPTIONS options);
void rrdr_json_wrapper_end(RRDR *r, BUFFER *wb, DATASOURCE_FORMAT format, RRDR_OPTIONS options);
void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb);
void rrdr_json_wrapper_end(RRDR *r, BUFFER *wb);
void rrdr_json_wrapper_begin2(RRDR *r, BUFFER *wb, DATASOURCE_FORMAT format, RRDR_OPTIONS options, RRDR_TIME_GROUPING group_method);
void rrdr_json_wrapper_begin2(RRDR *r, BUFFER *wb);
void rrdr_json_wrapper_end2(RRDR *r, BUFFER *wb);
#endif //NETDATA_API_FORMATTER_JSON_WRAPPER_H

View file

@ -148,448 +148,6 @@ cleanup:
return ret;
}
struct group_by_entry {
size_t priority;
size_t count;
STRING *id;
STRING *name;
STRING *units;
RRDR_DIMENSION_FLAGS od;
};
static int group_by_label_is_space(char c) {
if(c == ',' || c == '|')
return 1;
return 0;
}
static RRDR *data_query_group_by(RRDR *r) {
QUERY_TARGET *qt = r->internal.qt;
RRDR_OPTIONS options = qt->request.options;
size_t rows = rrdr_rows(r);
if(qt->request.group_by == RRDR_GROUP_BY_NONE || !rows)
return r;
struct group_by_entry *entries = onewayalloc_callocz(r->internal.owa, qt->query.used, sizeof(struct group_by_entry));
DICTIONARY *groups = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE);
if(qt->request.group_by & RRDR_GROUP_BY_LABEL && qt->request.group_by_label && *qt->request.group_by_label)
qt->group_by.used = quoted_strings_splitter(qt->request.group_by_label, qt->group_by.label_keys, GROUP_BY_MAX_LABEL_KEYS, group_by_label_is_space);
if(!qt->group_by.used)
qt->request.group_by &= ~RRDR_GROUP_BY_LABEL;
if(!(qt->request.group_by & (RRDR_GROUP_BY_NODE | RRDR_GROUP_BY_INSTANCE | RRDR_GROUP_BY_DIMENSION | RRDR_GROUP_BY_LABEL | RRDR_GROUP_BY_SELECTED)))
qt->request.group_by = RRDR_GROUP_BY_DIMENSION;
int added = 0;
BUFFER *key = buffer_create(0, NULL);
QUERY_INSTANCE *last_qi = NULL;
size_t priority = 0;
time_t update_every_max = 0;
for(size_t d = 0; d < qt->query.used ; d++) {
if(unlikely(!(r->od[d] & RRDR_DIMENSION_QUERIED)))
continue;
QUERY_METRIC *qm = query_metric(qt, d);
QUERY_INSTANCE *qi = query_instance(qt, qm->link.query_instance_id);
QUERY_NODE *qn = query_node(qt, qm->link.query_node_id);
if(qi != last_qi) {
priority = 0;
last_qi = qi;
time_t update_every = rrdinstance_acquired_update_every(qi->ria);
if(update_every > update_every_max)
update_every_max = update_every;
}
else
priority++;
// --------------------------------------------------------------------
// generate the group by key
buffer_flush(key);
if(unlikely(r->od[d] & RRDR_DIMENSION_HIDDEN)) {
buffer_strcat(key, "__hidden_dimensions__");
}
else if(unlikely(qt->request.group_by & RRDR_GROUP_BY_SELECTED)) {
buffer_strcat(key, "selected");
}
else {
if (qt->request.group_by & RRDR_GROUP_BY_DIMENSION) {
buffer_fast_strcat(key, "|", 1);
buffer_strcat(key, query_metric_id(qt, qm));
}
if (qt->request.group_by & RRDR_GROUP_BY_INSTANCE) {
buffer_fast_strcat(key, "|", 1);
buffer_strcat(key, string2str(query_instance_id_fqdn(qt, qi)));
}
if (qt->request.group_by & RRDR_GROUP_BY_LABEL) {
DICTIONARY *labels = rrdinstance_acquired_labels(qi->ria);
for (size_t l = 0; l < qt->group_by.used; l++) {
buffer_fast_strcat(key, "|", 1);
rrdlabels_get_value_to_buffer_or_unset(labels, key, qt->group_by.label_keys[l], "[unset]");
}
}
if (qt->request.group_by & RRDR_GROUP_BY_NODE) {
buffer_fast_strcat(key, "|", 1);
buffer_strcat(key, qn->rrdhost->machine_guid);
}
// append the units
if (query_target_has_percentage_units(qt)) {
buffer_fast_strcat(key, "|%", 2);
} else {
buffer_fast_strcat(key, "|", 1);
buffer_strcat(key, rrdinstance_acquired_units(qi->ria));
}
}
// lookup the key in the dictionary
int pos = -1;
int *set = dictionary_set(groups, buffer_tostring(key), &pos, sizeof(pos));
if(*set == -1) {
// the key just added to the dictionary
*set = pos = added++;
// ----------------------------------------------------------------
// generate the dimension id
buffer_flush(key);
if(unlikely(r->od[d] & RRDR_DIMENSION_HIDDEN)) {
buffer_strcat(key, "__hidden_dimensions__");
}
else if(unlikely(qt->request.group_by & RRDR_GROUP_BY_SELECTED)) {
buffer_strcat(key, "selected");
}
else {
if (qt->request.group_by & RRDR_GROUP_BY_DIMENSION) {
buffer_strcat(key, query_metric_id(qt, qm));
}
if (qt->request.group_by & RRDR_GROUP_BY_INSTANCE) {
if (buffer_strlen(key) != 0)
buffer_fast_strcat(key, ",", 1);
if (qt->request.group_by & RRDR_GROUP_BY_NODE)
buffer_strcat(key, rrdinstance_acquired_id(qi->ria));
else
buffer_strcat(key, string2str(query_instance_id_fqdn(qt, qi)));
}
if (qt->request.group_by & RRDR_GROUP_BY_LABEL) {
DICTIONARY *labels = rrdinstance_acquired_labels(qi->ria);
for (size_t l = 0; l < qt->group_by.used; l++) {
if (buffer_strlen(key) != 0)
buffer_fast_strcat(key, ",", 1);
rrdlabels_get_value_to_buffer_or_unset(labels, key, qt->group_by.label_keys[l], "[unset]");
}
}
if (qt->request.group_by & RRDR_GROUP_BY_NODE) {
if (buffer_strlen(key) != 0)
buffer_fast_strcat(key, ",", 1);
buffer_strcat(key, qn->rrdhost->machine_guid);
}
}
entries[pos].id = string_strdupz(buffer_tostring(key));
// ----------------------------------------------------------------
// generate the dimension name
buffer_flush(key);
if(unlikely(r->od[d] & RRDR_DIMENSION_HIDDEN)) {
buffer_strcat(key, "__hidden_dimensions__");
}
else if(unlikely(qt->request.group_by & RRDR_GROUP_BY_SELECTED)) {
buffer_strcat(key, "selected");
}
else {
if (qt->request.group_by & RRDR_GROUP_BY_DIMENSION) {
buffer_strcat(key, query_metric_name(qt, qm));
}
if (qt->request.group_by & RRDR_GROUP_BY_INSTANCE) {
if (buffer_strlen(key) != 0)
buffer_fast_strcat(key, ",", 1);
if (qt->request.group_by & RRDR_GROUP_BY_NODE)
buffer_strcat(key, rrdinstance_acquired_name(qi->ria));
else
buffer_strcat(key, string2str(query_instance_name_fqdn(qt, qi)));
}
if (qt->request.group_by & RRDR_GROUP_BY_LABEL) {
DICTIONARY *labels = rrdinstance_acquired_labels(qi->ria);
for (size_t l = 0; l < qt->group_by.used; l++) {
if (buffer_strlen(key) != 0)
buffer_fast_strcat(key, ",", 1);
rrdlabels_get_value_to_buffer_or_unset(labels, key, qt->group_by.label_keys[l], "[unset]");
}
}
if (qt->request.group_by & RRDR_GROUP_BY_NODE) {
if (buffer_strlen(key) != 0)
buffer_fast_strcat(key, ",", 1);
buffer_strcat(key, rrdhost_hostname(qn->rrdhost));
}
}
entries[pos].name = string_strdupz(buffer_tostring(key));
// add the rest of the info
entries[pos].units = rrdinstance_acquired_units_dup(qi->ria);
entries[pos].priority = priority;
}
else {
// the key found in the dictionary
pos = *set;
}
entries[pos].count++;
if(unlikely(priority < entries[pos].priority))
entries[pos].priority = priority;
qm->grouped_as.slot = pos;
qm->grouped_as.id = entries[pos].id;
qm->grouped_as.name = entries[pos].name;
qm->grouped_as.units = entries[pos].units;
// copy the dimension flags decided by the query target
// we need this, because if a dimension is explicitly selected
// the query target adds to it the non-zero flag
qm->status |= RRDR_DIMENSION_GROUPED | r->od[d];
entries[pos].od |= RRDR_DIMENSION_GROUPED | r->od[d];
}
// check if we have multiple units
bool multiple_units = false;
for(int i = 1; i < added ; i++) {
if(entries[i].units != entries[0].units) {
multiple_units = true;
break;
}
}
if(multiple_units) {
// include the units into the id and name of the dimensions
for(int i = 0; i < added ; i++) {
buffer_flush(key);
buffer_strcat(key, string2str(entries[i].id));
buffer_fast_strcat(key, ",", 1);
buffer_strcat(key, string2str(entries[i].units));
STRING *u = string_strdupz(buffer_tostring(key));
string_freez(entries[i].id);
entries[i].id = u;
}
}
RRDR *r2 = rrdr_create(r->internal.owa, qt, added, rows);
if(!r2)
goto cleanup;
r2->dp = onewayalloc_callocz(r2->internal.owa, r2->d, sizeof(*r2->dp));
r2->dgbc = onewayalloc_callocz(r2->internal.owa, r2->d, sizeof(*r2->dgbc));
r2->gbc = onewayalloc_callocz(r2->internal.owa, r2->n * r2->d, sizeof(*r2->gbc));
// copy from previous rrdr
r2->view = r->view;
r2->stats = r->stats;
r2->rows = rows;
r2->stats.result_points_generated = r2->d * r2->n;
// initialize r2 (dimension options, names, and ids)
for(size_t d2 = 0; d2 < r2->d ; d2++) {
r2->od[d2] = entries[d2].od;
r2->di[d2] = entries[d2].id;
r2->dn[d2] = entries[d2].name;
r2->du[d2] = entries[d2].units;
r2->dp[d2] = entries[d2].priority;
r2->dgbc[d2] = entries[d2].count;
}
r2->partial_data_trimming.max_update_every = update_every_max;
r2->partial_data_trimming.expected_after =
(!(qt->request.options & RRDR_OPTION_RETURN_RAW) && qt->window.before >= qt->window.now - update_every_max) ?
qt->window.before - update_every_max :
qt->window.before;
r2->partial_data_trimming.trimmed_after = qt->window.before;
// initialize r2 (timestamps and value flags)
for(size_t i = 0; i != rows ;i++) {
// copy the timestamp
r2->t[i] = r->t[i];
// make all values empty
NETDATA_DOUBLE *cn2 = &r2->v[ i * r2->d ];
RRDR_VALUE_FLAGS *co2 = &r2->o[ i * r2->d ];
NETDATA_DOUBLE *ar2 = &r2->ar[ i * r2->d ];
for (size_t d2 = 0; d2 < r2->d; d2++) {
cn2[d2] = 0.0;
ar2[d2] = 0.0;
co2[d2] = RRDR_VALUE_EMPTY;
}
}
// do the group_by
size_t last_row_gbc = 0;
for(size_t i = 0; i != rows ;i++) {
size_t idx = i * r->d;
NETDATA_DOUBLE *cn_base = &r->v[ idx ];
RRDR_VALUE_FLAGS *co_base = &r->o[ idx ];
NETDATA_DOUBLE *ar_base = &r->ar[ idx ];
size_t idx2 = i * r2->d;
NETDATA_DOUBLE *cn2_base = &r2->v[ idx2 ];
RRDR_VALUE_FLAGS *co2_base = &r2->o[ idx2 ];
NETDATA_DOUBLE *ar2_base = &r2->ar[ idx2 ];
uint32_t *gbc2_base = &r2->gbc[ idx2 ];
size_t row_gbc = 0;
for(size_t d = 0; d < r->d ; d++) {
if(unlikely(!(r->od[d] & RRDR_DIMENSION_QUERIED)))
continue;
NETDATA_DOUBLE n = cn_base[d];
RRDR_VALUE_FLAGS o = co_base[d];
NETDATA_DOUBLE ar = ar_base[d];
if(o & RRDR_VALUE_EMPTY) {
if(options & RRDR_OPTION_NULL2ZERO)
n = 0.0;
else
continue;
}
if(unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
n = -n;
QUERY_METRIC *qm = query_metric(qt, d);
size_t d2 = qm->grouped_as.slot;
NETDATA_DOUBLE *cn2 = &cn2_base[d2];
RRDR_VALUE_FLAGS *co2 = &co2_base[d2];
NETDATA_DOUBLE *ar2 = &ar2_base[d2];
uint32_t *gbc2 = &gbc2_base[d2];
switch(qt->request.group_by_aggregate_function) {
default:
case RRDR_GROUP_BY_FUNCTION_AVERAGE:
case RRDR_GROUP_BY_FUNCTION_SUM:
*cn2 += n;
break;
case RRDR_GROUP_BY_FUNCTION_MIN:
if(n < *cn2)
*cn2 = n;
break;
case RRDR_GROUP_BY_FUNCTION_MAX:
if(n > *cn2)
*cn2 = n;
break;
}
*co2 |= (o & (RRDR_VALUE_RESET|RRDR_VALUE_PARTIAL));
*ar2 += ar;
(*gbc2)++;
row_gbc++;
}
if(unlikely(r->t[i] > r2->partial_data_trimming.expected_after && row_gbc < last_row_gbc)) {
// discard the rest of the points
r2->partial_data_trimming.trimmed_after = r->t[i];
r2->rows = i;
rows = i;
break;
}
else
last_row_gbc = row_gbc;
}
// apply averaging, remove RRDR_VALUE_EMPTY, find the non-zero dimensions, min and max
size_t min_max_values = 0;
NETDATA_DOUBLE min = NAN, max = NAN;
for (size_t d2 = 0; d2 < r2->d; d2++) {
size_t non_zero = 0;
for(size_t i = 0; i != rows ;i++) {
size_t idx2 = i * r2->d + d2;
NETDATA_DOUBLE *cn2 = &r2->v[ idx2 ];
RRDR_VALUE_FLAGS *co2 = &r2->o[ idx2 ];
NETDATA_DOUBLE *ar2 = &r2->ar[ idx2 ];
uint32_t gbc2 = r2->gbc[ idx2 ];
if(likely(gbc2)) {
*co2 &= ~RRDR_VALUE_EMPTY;
if(gbc2 != r2->dgbc[d2])
*co2 |= RRDR_VALUE_PARTIAL;
NETDATA_DOUBLE n;
if(qt->request.group_by_aggregate_function == RRDR_GROUP_BY_FUNCTION_AVERAGE)
n = (*cn2 /= gbc2);
else
n = *cn2;
if(!query_target_aggregatable(qt))
*ar2 /= gbc2;
if(islessgreater(n, 0.0))
non_zero++;
if(unlikely(!min_max_values++)) {
min = n;
max = n;
}
else {
if(n < min)
min = n;
if(n > max)
max = n;
}
}
}
if(non_zero)
r2->od[d2] |= RRDR_DIMENSION_NONZERO;
}
r2->view.min = min;
r2->view.max = max;
cleanup:
buffer_free(key);
if(!r2 && entries && added) {
for(int d2 = 0; d2 < added ; d2++) {
string_freez(entries[d2].id);
string_freez(entries[d2].name);
}
}
onewayalloc_freez(r->internal.owa, entries);
dictionary_destroy(groups);
return r2;
}
static inline void buffer_json_member_add_key_only(BUFFER *wb, const char *key) {
buffer_print_json_comma_newline_spacing(wb);
buffer_print_json_key(wb, key);
@ -610,46 +168,46 @@ int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, QUERY_TARGET *qt, time_t *l
wrapper_begin_t wrapper_begin = rrdr_json_wrapper_begin;
wrapper_end_t wrapper_end = rrdr_json_wrapper_end;
if(qt->request.version == 2)
if(qt->request.version == 2) {
wrapper_begin = rrdr_json_wrapper_begin2;
wrapper_end = rrdr_json_wrapper_end2;
}
RRDR *r1 = rrd2rrdr(owa, qt);
RRDR *r = rrd2rrdr(owa, qt);
qt->timings.executed_ut = now_monotonic_usec();
if(!r1) {
if(!r) {
buffer_strcat(wb, "Cannot generate output with these parameters on this chart.");
return HTTP_RESP_INTERNAL_SERVER_ERROR;
}
if (r1->view.flags & RRDR_RESULT_FLAG_CANCEL) {
rrdr_free(owa, r1);
if (r->view.flags & RRDR_RESULT_FLAG_CANCEL) {
rrdr_free(owa, r);
return HTTP_RESP_BACKEND_FETCH_FAILED;
}
if(r1->view.flags & RRDR_RESULT_FLAG_RELATIVE)
if(r->view.flags & RRDR_RESULT_FLAG_RELATIVE)
buffer_no_cacheable(wb);
else if(r1->view.flags & RRDR_RESULT_FLAG_ABSOLUTE)
else if(r->view.flags & RRDR_RESULT_FLAG_ABSOLUTE)
buffer_cacheable(wb);
if(latest_timestamp && rrdr_rows(r1) > 0)
*latest_timestamp = r1->view.before;
if(latest_timestamp && rrdr_rows(r) > 0)
*latest_timestamp = r->view.before;
DATASOURCE_FORMAT format = qt->request.format;
RRDR_OPTIONS options = qt->request.options;
RRDR_TIME_GROUPING group_method = qt->request.time_group_method;
RRDR *r = data_query_group_by(r1);
qt->timings.group_by_ut = now_monotonic_usec();
switch(format) {
case DATASOURCE_SSV:
if(options & RRDR_OPTION_JSON_WRAP) {
wb->content_type = CT_APPLICATION_JSON;
wrapper_begin(r, wb, format, options, group_method);
wrapper_begin(r, wb);
buffer_json_member_add_string_open(wb, "result");
rrdr2ssv(r, wb, options, "", " ", "");
buffer_json_member_add_string_close(wb);
wrapper_end(r, wb, format, options);
wrapper_end(r, wb);
}
else {
wb->content_type = CT_TEXT_PLAIN;
@ -660,11 +218,11 @@ int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, QUERY_TARGET *qt, time_t *l
case DATASOURCE_SSV_COMMA:
if(options & RRDR_OPTION_JSON_WRAP) {
wb->content_type = CT_APPLICATION_JSON;
wrapper_begin(r, wb, format, options, group_method);
wrapper_begin(r, wb);
buffer_json_member_add_string_open(wb, "result");
rrdr2ssv(r, wb, options, "", ",", "");
buffer_json_member_add_string_close(wb);
wrapper_end(r, wb, format, options);
wrapper_end(r, wb);
}
else {
wb->content_type = CT_TEXT_PLAIN;
@ -675,11 +233,11 @@ int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, QUERY_TARGET *qt, time_t *l
case DATASOURCE_JS_ARRAY:
if(options & RRDR_OPTION_JSON_WRAP) {
wb->content_type = CT_APPLICATION_JSON;
wrapper_begin(r, wb, format, options, group_method);
wrapper_begin(r, wb);
buffer_json_member_add_array(wb, "result");
rrdr2ssv(r, wb, options, "", ",", "");
buffer_json_array_close(wb);
wrapper_end(r, wb, format, options);
wrapper_end(r, wb);
}
else {
wb->content_type = CT_APPLICATION_JSON;
@ -690,11 +248,11 @@ int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, QUERY_TARGET *qt, time_t *l
case DATASOURCE_CSV:
if(options & RRDR_OPTION_JSON_WRAP) {
wb->content_type = CT_APPLICATION_JSON;
wrapper_begin(r, wb, format, options, group_method);
wrapper_begin(r, wb);
buffer_json_member_add_string_open(wb, "result");
rrdr2csv(r, wb, format, options, "", ",", "\\n", "");
buffer_json_member_add_string_close(wb);
wrapper_end(r, wb, format, options);
wrapper_end(r, wb);
}
else {
wb->content_type = CT_TEXT_PLAIN;
@ -705,11 +263,11 @@ int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, QUERY_TARGET *qt, time_t *l
case DATASOURCE_CSV_MARKDOWN:
if(options & RRDR_OPTION_JSON_WRAP) {
wb->content_type = CT_APPLICATION_JSON;
wrapper_begin(r, wb, format, options, group_method);
wrapper_begin(r, wb);
buffer_json_member_add_string_open(wb, "result");
rrdr2csv(r, wb, format, options, "", "|", "\\n", "");
buffer_json_member_add_string_close(wb);
wrapper_end(r, wb, format, options);
wrapper_end(r, wb);
}
else {
wb->content_type = CT_TEXT_PLAIN;
@ -720,11 +278,11 @@ int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, QUERY_TARGET *qt, time_t *l
case DATASOURCE_CSV_JSON_ARRAY:
wb->content_type = CT_APPLICATION_JSON;
if(options & RRDR_OPTION_JSON_WRAP) {
wrapper_begin(r, wb, format, options, group_method);
wrapper_begin(r, wb);
buffer_json_member_add_array(wb, "result");
rrdr2csv(r, wb, format, options + RRDR_OPTION_LABEL_QUOTES, "[", ",", "]", ",\n");
buffer_json_array_close(wb);
wrapper_end(r, wb, format, options);
wrapper_end(r, wb);
}
else {
wb->content_type = CT_APPLICATION_JSON;
@ -737,11 +295,11 @@ int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, QUERY_TARGET *qt, time_t *l
case DATASOURCE_TSV:
if(options & RRDR_OPTION_JSON_WRAP) {
wb->content_type = CT_APPLICATION_JSON;
wrapper_begin(r, wb, format, options, group_method);
wrapper_begin(r, wb);
buffer_json_member_add_string_open(wb, "result");
rrdr2csv(r, wb, format, options, "", "\t", "\\n", "");
buffer_json_member_add_string_close(wb);
wrapper_end(r, wb, format, options);
wrapper_end(r, wb);
}
else {
wb->content_type = CT_TEXT_PLAIN;
@ -752,13 +310,13 @@ int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, QUERY_TARGET *qt, time_t *l
case DATASOURCE_HTML:
if(options & RRDR_OPTION_JSON_WRAP) {
wb->content_type = CT_APPLICATION_JSON;
wrapper_begin(r, wb, format, options, group_method);
wrapper_begin(r, wb);
buffer_json_member_add_string_open(wb, "result");
buffer_strcat(wb, "<html>\\n<center>\\n<table border=\\\"0\\\" cellpadding=\\\"5\\\" cellspacing=\\\"5\\\">\\n");
rrdr2csv(r, wb, format, options, "<tr><td>", "</td><td>", "</td></tr>\\n", "");
buffer_strcat(wb, "</table>\\n</center>\\n</html>\\n");
buffer_json_member_add_string_close(wb);
wrapper_end(r, wb, format, options);
wrapper_end(r, wb);
}
else {
wb->content_type = CT_TEXT_HTML;
@ -772,14 +330,14 @@ int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, QUERY_TARGET *qt, time_t *l
wb->content_type = CT_APPLICATION_X_JAVASCRIPT;
if(options & RRDR_OPTION_JSON_WRAP) {
wrapper_begin(r, wb, format, options, group_method);
wrapper_begin(r, wb);
buffer_json_member_add_key_only(wb, "result");
}
rrdr2json(r, wb, options, 1);
if(options & RRDR_OPTION_JSON_WRAP)
wrapper_end(r, wb, format, options);
wrapper_end(r, wb);
break;
@ -787,28 +345,28 @@ int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, QUERY_TARGET *qt, time_t *l
wb->content_type = CT_APPLICATION_JSON;
if(options & RRDR_OPTION_JSON_WRAP) {
wrapper_begin(r, wb, format, options, group_method);
wrapper_begin(r, wb);
buffer_json_member_add_key_only(wb, "result");
}
rrdr2json(r, wb, options, 1);
if(options & RRDR_OPTION_JSON_WRAP)
wrapper_end(r, wb, format, options);
wrapper_end(r, wb);
break;
case DATASOURCE_JSONP:
wb->content_type = CT_APPLICATION_X_JAVASCRIPT;
if(options & RRDR_OPTION_JSON_WRAP) {
wrapper_begin(r, wb, format, options, group_method);
wrapper_begin(r, wb);
buffer_json_member_add_key_only(wb, "result");
}
rrdr2json(r, wb, options, 0);
if(options & RRDR_OPTION_JSON_WRAP)
wrapper_end(r, wb, format, options);
wrapper_end(r, wb);
break;
@ -817,36 +375,29 @@ int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, QUERY_TARGET *qt, time_t *l
wb->content_type = CT_APPLICATION_JSON;
if(options & RRDR_OPTION_JSON_WRAP) {
wrapper_begin(r, wb, format, options, group_method);
wrapper_begin(r, wb);
buffer_json_member_add_key_only(wb, "result");
}
rrdr2json(r, wb, options, 0);
if(options & RRDR_OPTION_JSON_WRAP) {
if (query_target_aggregatable(qt)) {
buffer_json_member_add_key_only(wb, "group_by_count");
rrdr2json(r, wb, options | RRDR_OPTION_INTERNAL_GBC, false);
}
if (options & RRDR_OPTION_RETURN_JWAR) {
buffer_json_member_add_key_only(wb, "anomaly_rates");
rrdr2json(r, wb, options | RRDR_OPTION_INTERNAL_AR, false);
}
wrapper_end(r, wb, format, options);
wrapper_end(r, wb);
}
break;
case DATASOURCE_JSON2:
wb->content_type = CT_APPLICATION_JSON;
wrapper_begin(r, wb, format, options, group_method);
rrdr2json_v2(r, wb, format, options);
wrapper_end(r, wb, format, options);
wrapper_begin(r, wb);
rrdr2json_v2(r, wb);
wrapper_end(r, wb);
break;
}
if(r != r1)
rrdr_free(owa, r);
rrdr_free(owa, r1);
rrdr_free(owa, r);
return HTTP_RESP_OK;
}

View file

@ -59,6 +59,8 @@ const char *rrdr_format_to_string(DATASOURCE_FORMAT format);
int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, struct query_target *qt, time_t *latest_timestamp);
void rrdr_json_group_by_labels(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options);
struct query_target;
bool query_target_has_percentage_units(struct query_target *qt);
bool query_target_aggregatable(struct query_target *qt);

File diff suppressed because it is too large Load diff

View file

@ -357,7 +357,9 @@ paths:
- name: group_by
in: query
description: |
A comma separated list of `dimension`, `label`, `instance`, `node`, `selected`. All possible values can be combined together, except `selected`. If `selected` is given in the list, all others are ignored. The order they are placed in the list is currently ignored. The result will always have in the order given here.
A comma separated list of the groupings required.
All possible values can be combined together, except `selected`. If `selected` is given in the list, all others are ignored.
The order they are placed in the list is currently ignored.
required: false
schema:
type: array
@ -365,9 +367,11 @@ paths:
type: string
enum:
- dimension
- label
- instance
- label
- node
- context
- units
- selected
default:
- dimension
@ -491,11 +495,12 @@ paths:
- markdown
- array
- csvjsonarray
default: json
default: json2
- name: options
in: query
description: |
Options that affect data generation.
`raw` changes the output so that the values can be aggregated across multiple such queries.
required: false
allowEmptyValue: false
schema:
@ -503,24 +508,21 @@ paths:
items:
type: string
enum:
- raw
- nonzero
- flip
- jsonwrap
- min2max
- seconds
- milliseconds
- abs
- absolute
- absolute-sum
- null2zero
- objectrows
- google_json
- percentage
- unaligned
- match-ids
- match-names
- allow_past
- anomaly-bit
- group-by-labels
default:
- seconds
- jsonwrap
@ -2544,12 +2546,12 @@ components:
id:
description: The value string
type: string
ds:
$ref: "#/components/schemas/data_json2_items_count"
sts:
oneOf:
- $ref: "#/components/schemas/data_json2_sts"
- $ref: "#/components/schemas/data_json2_sts_raw"
ds:
$ref: "#/components/schemas/data_json2_items_count"
sts:
oneOf:
- $ref: "#/components/schemas/data_json2_sts"
- $ref: "#/components/schemas/data_json2_sts_raw"
alerts:
description: |
An array of all the unique alerts running, grouped by alert name (`nm` is available here)
@ -2577,14 +2579,257 @@ components:
type: string
db:
type: object
properties:
tiers:
description: |
The number of tiers this server is using.
type: integer
update_every:
description: |
The minimum update every, in seconds, for all tiers and all metrics aggregated into this query.
type: integer
first_entry:
description: |
The minimum unix epoch timestamp of the retention across all tiers for all metrics aggregated into this query.
type: integer
last_entry:
description: |
The maximum unix epoch timestamp of the retention across all tier for all metrics aggregated into this query.
type: integer
per_tier:
description: |
An array with information for each of the tiers available, related to this query.
type: array
items:
type: object
properties:
tier:
description: |
The tier number of this tier, starting at 0.
type: integer
queries:
description: |
The number of queries executed on this tier. Usually one query per metric is made, but the query may cross multiple tier, in which case more than one query per metric is made.
type: integer
points:
description: |
The number of points read from this tier.
type: integer
update_every:
description: |
The minimum resolution of all metrics queried on this tier.
type: integer
first_entry:
description: |
The minimum unix epoch timestamp available across all metrics that used this tier. This reflects the oldest timestamp of the tier's retention.
type: integer
last_entry:
description: |
The maximum unix epoch timestamp available across all metrics that used this tier. This reflects the newest timestamp of the tier's retention.
view:
type: object
properties:
title:
description: |
The title the chart should have.
type: string
format:
description: |
The format the `result` top level member has.
type: string
options:
description: |
An array presenting all the options given to the query.
type: array
items:
type: string
time_group:
description: |
The same as the parameter `time_group`.
type: string
after:
description: |
The oldest unix epoch timestamp of the data returned in the `result`.
type: integer
before:
description: |
The newest unix epoch timestamp of the data returned in the `result`.
type: integer
partial_data_trimming:
description: |
Information related to trimming of the last few points of the `result`, that was required to remove (increasing) partial data.
Trimming is disabled when the `raw` option is given to the query.
type: object
properties:
max_update_every:
description: |
The maximum `update_every` for all metrics aggregated into the query.
Trimming is by default enabled at `view.before - max_update_every`, but only when `view.before >= now - max_update_every`.
type: integer
expected_after:
description: |
The timestamp at which trimming can be enabled.
If this timestamp is greater or equal to `view.before`, there is no trimming.
type: integer
trimmed_after:
description: |
The timestamp at which trimming has been applied.
If this timestamp is greater or equal to `view.before`, there is no trimming.
points:
description: |
The number of points in `result`.
type: integer
units:
description: |
The units of the query.
oneOf:
- type: string
- type: array
items:
type: string
chart_type:
description: |
The default chart type of the query.
type: string
enum:
- line
- area
- stacked
dimensions:
description: |
Detailed information about the chart dimensions included in the `result`.
type: object
properties:
grouped_by:
description: |
An array with the order of the groupings performed.
type: array
items:
type: string
enum:
- selected
- dimension
- instance
- node
- context
- units
- "label:key1"
- "label:key2"
- "label:keyN"
ids:
description: |
An array with the dimension ids that uniquely identify the dimensions for this query.
type: array
items:
type: string
names:
description: |
An array with the dimension names to be presented to users. Names may be overlapping, but IDs are not.
type: array
items:
type: string
units:
description: |
An array with the units each dimension has.
type: array
items:
type: string
priorities:
description: |
An array with the relative priorities of the dimensions.
Numbers may not be sequential or unique. The application is expected to order by this and then by name.
type: array
items:
type: integer
aggregated:
description: |
An array with the number of source metrics aggregated into each dimension.
type: array
items:
type: integer
view_average_values:
description: |
An array of the average value of each dimension across the entire query.
type: array
items:
type: number
view_latest_values:
description: |
An array of the latest value of each dimension, included in this query.
type: array
items:
type: number
count:
description: |
The number of dimensions in the `result`.
type: integer
labels:
description: |
The labels associated with each dimension in the query.
This object is only available when the `group-by-labels` option is given to the query.
type: object
properties:
label_key1:
description: |
An array having one entry for each of the dimensions of the query.
type: array
items:
description: |
An array having one entry for each of the values this label key has for the given dimension.
type: array
items:
type: string
min:
description: |
The minimum value of all points included in the `result`.
type: number
max:
description: |
The maximum value of all points included in the `result`.
type: number
result:
description: |
The result of the query.
The format explained here is `json2`.
type: object
min:
type: number
max:
type: number
properties:
labels:
description: |
The IDs of the dimensions returned. The first is always `time`.
type: array
items:
type: string
point:
description: |
The format of each point returned.
type: object
properties:
value:
description: |
The index of the value in each point.
type: integer
ar:
description: |
The index of the anomaly rate in each point.
type: integer
pa:
description: |
The index of the point annotations in each point.
This is a bitmap. `EMPTY = 1`, `RESET = 2`, `PARTIAL = 4`.
`EMPTY` means the point has no value.
`RESET` means that at least one metric aggregated experienced an overflow (a counter that wrapped).
`PARTIAL` means that this point should have more metrics aggregated into it, but not all metrics had data.
type: integer
count:
description: |
The number of metrics aggregated into this point. This exists only when the option `raw` is given to the query.
type: integer
data:
type: array
items:
allOf:
- type: integer
- type: array
timings:
type: object
data_json2_sts:

View file

@ -11,30 +11,30 @@ struct grouping_average {
};
void grouping_create_average(RRDR *r, const char *options __maybe_unused) {
r->grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_average));
r->time_grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_average));
}
// resets when switches dimensions
// so, clear everything to restart
void grouping_reset_average(RRDR *r) {
struct grouping_average *g = (struct grouping_average *)r->grouping.data;
struct grouping_average *g = (struct grouping_average *)r->time_grouping.data;
g->sum = 0;
g->count = 0;
}
void grouping_free_average(RRDR *r) {
onewayalloc_freez(r->internal.owa, r->grouping.data);
r->grouping.data = NULL;
onewayalloc_freez(r->internal.owa, r->time_grouping.data);
r->time_grouping.data = NULL;
}
void grouping_add_average(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_average *g = (struct grouping_average *)r->grouping.data;
struct grouping_average *g = (struct grouping_average *)r->time_grouping.data;
g->sum += value;
g->count++;
}
NETDATA_DOUBLE grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_average *g = (struct grouping_average *)r->grouping.data;
struct grouping_average *g = (struct grouping_average *)r->time_grouping.data;
NETDATA_DOUBLE value;
@ -43,8 +43,8 @@ NETDATA_DOUBLE grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_opt
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
}
else {
if(unlikely(r->grouping.resampling_group != 1))
value = g->sum / r->grouping.resampling_divisor;
if(unlikely(r->time_grouping.resampling_group != 1))
value = g->sum / r->time_grouping.resampling_divisor;
else
value = g->sum / g->count;
}

View file

@ -38,7 +38,7 @@ static size_t countif_greaterequal(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
void grouping_create_countif(RRDR *r, const char *options __maybe_unused) {
struct grouping_countif *g = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_countif));
r->grouping.data = g;
r->time_grouping.data = g;
if(options && *options) {
// skip any leading spaces
@ -100,24 +100,24 @@ void grouping_create_countif(RRDR *r, const char *options __maybe_unused) {
// resets when switches dimensions
// so, clear everything to restart
void grouping_reset_countif(RRDR *r) {
struct grouping_countif *g = (struct grouping_countif *)r->grouping.data;
struct grouping_countif *g = (struct grouping_countif *)r->time_grouping.data;
g->matched = 0;
g->count = 0;
}
void grouping_free_countif(RRDR *r) {
onewayalloc_freez(r->internal.owa, r->grouping.data);
r->grouping.data = NULL;
onewayalloc_freez(r->internal.owa, r->time_grouping.data);
r->time_grouping.data = NULL;
}
void grouping_add_countif(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_countif *g = (struct grouping_countif *)r->grouping.data;
struct grouping_countif *g = (struct grouping_countif *)r->time_grouping.data;
g->matched += g->comparison(value, g->target);
g->count++;
}
NETDATA_DOUBLE grouping_flush_countif(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_countif *g = (struct grouping_countif *)r->grouping.data;
struct grouping_countif *g = (struct grouping_countif *)r->time_grouping.data;
NETDATA_DOUBLE value;

View file

@ -37,7 +37,7 @@ static inline NETDATA_DOUBLE window(RRDR *r, struct grouping_des *g) {
NETDATA_DOUBLE points;
if(r->view.group == 1) {
// provide a running DES
points = (NETDATA_DOUBLE)r->grouping.points_wanted;
points = (NETDATA_DOUBLE)r->time_grouping.points_wanted;
}
else {
// provide a SES with flush points
@ -76,13 +76,13 @@ void grouping_create_des(RRDR *r, const char *options __maybe_unused) {
g->level = 0.0;
g->trend = 0.0;
g->count = 0;
r->grouping.data = g;
r->time_grouping.data = g;
}
// resets when switches dimensions
// so, clear everything to restart
void grouping_reset_des(RRDR *r) {
struct grouping_des *g = (struct grouping_des *)r->grouping.data;
struct grouping_des *g = (struct grouping_des *)r->time_grouping.data;
g->level = 0.0;
g->trend = 0.0;
g->count = 0;
@ -92,12 +92,12 @@ void grouping_reset_des(RRDR *r) {
}
void grouping_free_des(RRDR *r) {
onewayalloc_freez(r->internal.owa, r->grouping.data);
r->grouping.data = NULL;
onewayalloc_freez(r->internal.owa, r->time_grouping.data);
r->time_grouping.data = NULL;
}
void grouping_add_des(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_des *g = (struct grouping_des *)r->grouping.data;
struct grouping_des *g = (struct grouping_des *)r->time_grouping.data;
if(likely(g->count > 0)) {
// we have at least a number so far
@ -124,7 +124,7 @@ void grouping_add_des(RRDR *r, NETDATA_DOUBLE value) {
}
NETDATA_DOUBLE grouping_flush_des(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_des *g = (struct grouping_des *)r->grouping.data;
struct grouping_des *g = (struct grouping_des *)r->time_grouping.data;
if(unlikely(!g->count || !netdata_double_isnumber(g->level))) {
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;

View file

@ -12,25 +12,25 @@ struct grouping_incremental_sum {
};
void grouping_create_incremental_sum(RRDR *r, const char *options __maybe_unused) {
r->grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_incremental_sum));
r->time_grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_incremental_sum));
}
// resets when switches dimensions
// so, clear everything to restart
void grouping_reset_incremental_sum(RRDR *r) {
struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->grouping.data;
struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->time_grouping.data;
g->first = 0;
g->last = 0;
g->count = 0;
}
void grouping_free_incremental_sum(RRDR *r) {
onewayalloc_freez(r->internal.owa, r->grouping.data);
r->grouping.data = NULL;
onewayalloc_freez(r->internal.owa, r->time_grouping.data);
r->time_grouping.data = NULL;
}
void grouping_add_incremental_sum(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->grouping.data;
struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->time_grouping.data;
if(unlikely(!g->count)) {
g->first = value;
@ -43,7 +43,7 @@ void grouping_add_incremental_sum(RRDR *r, NETDATA_DOUBLE value) {
}
NETDATA_DOUBLE grouping_flush_incremental_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->grouping.data;
struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->time_grouping.data;
NETDATA_DOUBLE value;

View file

@ -11,24 +11,24 @@ struct grouping_max {
};
void grouping_create_max(RRDR *r, const char *options __maybe_unused) {
r->grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_max));
r->time_grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_max));
}
// resets when switches dimensions
// so, clear everything to restart
void grouping_reset_max(RRDR *r) {
struct grouping_max *g = (struct grouping_max *)r->grouping.data;
struct grouping_max *g = (struct grouping_max *)r->time_grouping.data;
g->max = 0;
g->count = 0;
}
void grouping_free_max(RRDR *r) {
onewayalloc_freez(r->internal.owa, r->grouping.data);
r->grouping.data = NULL;
onewayalloc_freez(r->internal.owa, r->time_grouping.data);
r->time_grouping.data = NULL;
}
void grouping_add_max(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_max *g = (struct grouping_max *)r->grouping.data;
struct grouping_max *g = (struct grouping_max *)r->time_grouping.data;
if(!g->count || fabsndd(value) > fabsndd(g->max)) {
g->max = value;
@ -37,7 +37,7 @@ void grouping_add_max(RRDR *r, NETDATA_DOUBLE value) {
}
NETDATA_DOUBLE grouping_flush_max(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_max *g = (struct grouping_max *)r->grouping.data;
struct grouping_max *g = (struct grouping_max *)r->time_grouping.data;
NETDATA_DOUBLE value;

View file

@ -30,7 +30,7 @@ void grouping_create_median_internal(RRDR *r, const char *options, NETDATA_DOUBL
}
g->percent = g->percent / 100.0;
r->grouping.data = g;
r->time_grouping.data = g;
}
void grouping_create_median(RRDR *r, const char *options) {
@ -64,20 +64,20 @@ void grouping_create_trimmed_median25(RRDR *r, const char *options) {
// resets when switches dimensions
// so, clear everything to restart
void grouping_reset_median(RRDR *r) {
struct grouping_median *g = (struct grouping_median *)r->grouping.data;
struct grouping_median *g = (struct grouping_median *)r->time_grouping.data;
g->next_pos = 0;
}
void grouping_free_median(RRDR *r) {
struct grouping_median *g = (struct grouping_median *)r->grouping.data;
struct grouping_median *g = (struct grouping_median *)r->time_grouping.data;
if(g) onewayalloc_freez(r->internal.owa, g->series);
onewayalloc_freez(r->internal.owa, r->grouping.data);
r->grouping.data = NULL;
onewayalloc_freez(r->internal.owa, r->time_grouping.data);
r->time_grouping.data = NULL;
}
void grouping_add_median(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_median *g = (struct grouping_median *)r->grouping.data;
struct grouping_median *g = (struct grouping_median *)r->time_grouping.data;
if(unlikely(g->next_pos >= g->series_size)) {
g->series = onewayalloc_doublesize( r->internal.owa, g->series, g->series_size * sizeof(NETDATA_DOUBLE));
@ -88,7 +88,7 @@ void grouping_add_median(RRDR *r, NETDATA_DOUBLE value) {
}
NETDATA_DOUBLE grouping_flush_median(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_median *g = (struct grouping_median *)r->grouping.data;
struct grouping_median *g = (struct grouping_median *)r->time_grouping.data;
size_t available_slots = g->next_pos;
NETDATA_DOUBLE value;

View file

@ -11,24 +11,24 @@ struct grouping_min {
};
void grouping_create_min(RRDR *r, const char *options __maybe_unused) {
r->grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_min));
r->time_grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_min));
}
// resets when switches dimensions
// so, clear everything to restart
void grouping_reset_min(RRDR *r) {
struct grouping_min *g = (struct grouping_min *)r->grouping.data;
struct grouping_min *g = (struct grouping_min *)r->time_grouping.data;
g->min = 0;
g->count = 0;
}
void grouping_free_min(RRDR *r) {
onewayalloc_freez(r->internal.owa, r->grouping.data);
r->grouping.data = NULL;
onewayalloc_freez(r->internal.owa, r->time_grouping.data);
r->time_grouping.data = NULL;
}
void grouping_add_min(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_min *g = (struct grouping_min *)r->grouping.data;
struct grouping_min *g = (struct grouping_min *)r->time_grouping.data;
if(!g->count || fabsndd(value) < fabsndd(g->min)) {
g->min = value;
@ -37,7 +37,7 @@ void grouping_add_min(RRDR *r, NETDATA_DOUBLE value) {
}
NETDATA_DOUBLE grouping_flush_min(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_min *g = (struct grouping_min *)r->grouping.data;
struct grouping_min *g = (struct grouping_min *)r->time_grouping.data;
NETDATA_DOUBLE value;

View file

@ -30,7 +30,7 @@ static void grouping_create_percentile_internal(RRDR *r, const char *options, NE
}
g->percent = g->percent / 100.0;
r->grouping.data = g;
r->time_grouping.data = g;
}
void grouping_create_percentile25(RRDR *r, const char *options) {
@ -64,20 +64,20 @@ void grouping_create_percentile99(RRDR *r, const char *options) {
// resets when switches dimensions
// so, clear everything to restart
void grouping_reset_percentile(RRDR *r) {
struct grouping_percentile *g = (struct grouping_percentile *)r->grouping.data;
struct grouping_percentile *g = (struct grouping_percentile *)r->time_grouping.data;
g->next_pos = 0;
}
void grouping_free_percentile(RRDR *r) {
struct grouping_percentile *g = (struct grouping_percentile *)r->grouping.data;
struct grouping_percentile *g = (struct grouping_percentile *)r->time_grouping.data;
if(g) onewayalloc_freez(r->internal.owa, g->series);
onewayalloc_freez(r->internal.owa, r->grouping.data);
r->grouping.data = NULL;
onewayalloc_freez(r->internal.owa, r->time_grouping.data);
r->time_grouping.data = NULL;
}
void grouping_add_percentile(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_percentile *g = (struct grouping_percentile *)r->grouping.data;
struct grouping_percentile *g = (struct grouping_percentile *)r->time_grouping.data;
if(unlikely(g->next_pos >= g->series_size)) {
g->series = onewayalloc_doublesize( r->internal.owa, g->series, g->series_size * sizeof(NETDATA_DOUBLE));
@ -88,7 +88,7 @@ void grouping_add_percentile(RRDR *r, NETDATA_DOUBLE value) {
}
NETDATA_DOUBLE grouping_flush_percentile(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_percentile *g = (struct grouping_percentile *)r->grouping.data;
struct grouping_percentile *g = (struct grouping_percentile *)r->time_grouping.data;
NETDATA_DOUBLE value;
size_t available_slots = g->next_pos;

File diff suppressed because it is too large Load diff

View file

@ -54,11 +54,13 @@ const char *time_grouping_tostring(RRDR_TIME_GROUPING group);
typedef enum rrdr_group_by {
RRDR_GROUP_BY_NONE = 0,
RRDR_GROUP_BY_DIMENSION = (1 << 0),
RRDR_GROUP_BY_NODE = (1 << 1),
RRDR_GROUP_BY_INSTANCE = (1 << 2),
RRDR_GROUP_BY_LABEL = (1 << 3),
RRDR_GROUP_BY_SELECTED = (1 << 4),
RRDR_GROUP_BY_SELECTED = (1 << 0),
RRDR_GROUP_BY_DIMENSION = (1 << 1),
RRDR_GROUP_BY_NODE = (1 << 2),
RRDR_GROUP_BY_INSTANCE = (1 << 3),
RRDR_GROUP_BY_LABEL = (1 << 4),
RRDR_GROUP_BY_CONTEXT = (1 << 5),
RRDR_GROUP_BY_UNITS = (1 << 6),
} RRDR_GROUP_BY;
struct web_buffer;

View file

@ -77,9 +77,30 @@ inline void rrdr_free(ONEWAYALLOC *owa, RRDR *r) {
onewayalloc_freez(owa, r->dn);
onewayalloc_freez(owa, r->du);
onewayalloc_freez(owa, r->dp);
onewayalloc_freez(owa, r->dv);
onewayalloc_freez(owa, r->ar);
onewayalloc_freez(owa, r->gbc);
onewayalloc_freez(owa, r->dgbc);
if(r->dl) {
for(size_t d = 0; d < r->d ;d++)
dictionary_destroy(r->dl[d]);
onewayalloc_freez(owa, r->dl);
}
dictionary_destroy(r->label_keys);
if(r->group_by.r) {
// prevent accidental infinite recursion
r->group_by.r->group_by.r = NULL;
// do not release qt twice
r->group_by.r->internal.qt = NULL;
rrdr_free(owa, r->group_by.r);
}
onewayalloc_freez(owa, r);
}
@ -94,7 +115,7 @@ RRDR *rrdr_create(ONEWAYALLOC *owa, QUERY_TARGET *qt, size_t dimensions, size_t
r->view.before = qt->window.before;
r->view.after = qt->window.after;
r->grouping.points_wanted = points;
r->time_grouping.points_wanted = points;
r->d = (int)dimensions;
r->n = (int)points;

View file

@ -44,11 +44,11 @@ typedef enum rrdr_options {
RRDR_OPTION_SHOW_DETAILS = (1 << 23), // v2 returns detailed object tree
RRDR_OPTION_DEBUG = (1 << 24), // v2 returns request description
RRDR_OPTION_MINIFY = (1 << 25), // remove JSON spaces and newlines from JSON output
RRDR_OPTION_GROUP_BY_LABELS = (1 << 26), // v2 returns flattened labels per dimension of the chart
// internal ones - not to be exposed to the API
RRDR_OPTION_HEALTH_RSRVD1 = (1 << 29), // reserved for RRDCALC_OPTION_NO_CLEAR_NOTIFICATION
RRDR_OPTION_INTERNAL_AR = (1 << 30), // internal use only, to let the formatters know we want to render the anomaly rate
RRDR_OPTION_INTERNAL_GBC = (1 << 31), // internal use only, to let the formatters know we want to render the group by count
RRDR_OPTION_HEALTH_RSRVD1 = (1 << 30), // reserved for RRDCALC_OPTION_NO_CLEAR_NOTIFICATION
RRDR_OPTION_INTERNAL_AR = (1 << 31), // internal use only, to let the formatters know we want to render the anomaly rate
} RRDR_OPTIONS;
typedef enum __attribute__ ((__packed__)) rrdr_value_flag {
@ -82,6 +82,16 @@ typedef enum __attribute__ ((__packed__)) rrdr_result_flags {
RRDR_RESULT_FLAG_CANCEL = (1 << 2), // the query needs to be cancelled
} RRDR_RESULT_FLAGS;
struct rrdr_group_by_entry {
size_t priority;
size_t count;
STRING *id;
STRING *name;
STRING *units;
RRDR_DIMENSION_FLAGS od;
DICTIONARY *dl;
};
typedef struct rrdresult {
size_t d; // the number of dimensions
size_t n; // the number of values in the arrays (number of points per dimension)
@ -94,6 +104,10 @@ typedef struct rrdresult {
STRING **du; // array of d dimension units
uint32_t *dgbc; // array of d dimension units - NOT ALLOCATED when RRDR is created
uint32_t *dp; // array of d dimension priority - NOT ALLOCATED when RRDR is created
NETDATA_DOUBLE *dv; // array of d dimension averages - NOT ALLOCATED when RRDR is created
DICTIONARY **dl; // array of d dimension labels - NOT ALLOCATED when RRDR is created
DICTIONARY *label_keys;
time_t *t; // array of n timestamps
NETDATA_DOUBLE *v; // array n x d values
@ -132,7 +146,11 @@ typedef struct rrdresult {
size_t points_wanted; // used by SES and DES
size_t resampling_group; // used by AVERAGE
NETDATA_DOUBLE resampling_divisor; // used by AVERAGE
} grouping;
} time_grouping;
struct {
struct rrdresult *r;
} group_by;
struct {
time_t max_update_every;
@ -143,6 +161,8 @@ typedef struct rrdresult {
struct {
ONEWAYALLOC *owa; // the allocator used
struct query_target *qt; // the QUERY_TARGET
size_t contexts; // temp needed between json_wrapper_begin2() and json_wrapper_end2()
size_t queries_count; // temp needed to know if a query is the first executed
#ifdef NETDATA_INTERNAL_CHECKS
const char *log;

View file

@ -31,7 +31,7 @@ static inline NETDATA_DOUBLE window(RRDR *r, struct grouping_ses *g) {
NETDATA_DOUBLE points;
if(r->view.group == 1) {
// provide a running DES
points = (NETDATA_DOUBLE)r->grouping.points_wanted;
points = (NETDATA_DOUBLE)r->time_grouping.points_wanted;
}
else {
// provide a SES with flush points
@ -52,24 +52,24 @@ void grouping_create_ses(RRDR *r, const char *options __maybe_unused) {
struct grouping_ses *g = (struct grouping_ses *)onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_ses));
set_alpha(r, g);
g->level = 0.0;
r->grouping.data = g;
r->time_grouping.data = g;
}
// resets when switches dimensions
// so, clear everything to restart
void grouping_reset_ses(RRDR *r) {
struct grouping_ses *g = (struct grouping_ses *)r->grouping.data;
struct grouping_ses *g = (struct grouping_ses *)r->time_grouping.data;
g->level = 0.0;
g->count = 0;
}
void grouping_free_ses(RRDR *r) {
onewayalloc_freez(r->internal.owa, r->grouping.data);
r->grouping.data = NULL;
onewayalloc_freez(r->internal.owa, r->time_grouping.data);
r->time_grouping.data = NULL;
}
void grouping_add_ses(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_ses *g = (struct grouping_ses *)r->grouping.data;
struct grouping_ses *g = (struct grouping_ses *)r->time_grouping.data;
if(unlikely(!g->count))
g->level = value;
@ -79,7 +79,7 @@ void grouping_add_ses(RRDR *r, NETDATA_DOUBLE value) {
}
NETDATA_DOUBLE grouping_flush_ses(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_ses *g = (struct grouping_ses *)r->grouping.data;
struct grouping_ses *g = (struct grouping_ses *)r->time_grouping.data;
if(unlikely(!g->count || !netdata_double_isnumber(g->level))) {
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;

View file

@ -15,23 +15,23 @@ struct grouping_stddev {
};
void grouping_create_stddev(RRDR *r, const char *options __maybe_unused) {
r->grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_stddev));
r->time_grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_stddev));
}
// resets when switches dimensions
// so, clear everything to restart
void grouping_reset_stddev(RRDR *r) {
struct grouping_stddev *g = (struct grouping_stddev *)r->grouping.data;
struct grouping_stddev *g = (struct grouping_stddev *)r->time_grouping.data;
g->count = 0;
}
void grouping_free_stddev(RRDR *r) {
onewayalloc_freez(r->internal.owa, r->grouping.data);
r->grouping.data = NULL;
onewayalloc_freez(r->internal.owa, r->time_grouping.data);
r->time_grouping.data = NULL;
}
void grouping_add_stddev(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_stddev *g = (struct grouping_stddev *)r->grouping.data;
struct grouping_stddev *g = (struct grouping_stddev *)r->time_grouping.data;
g->count++;
@ -62,7 +62,7 @@ static inline NETDATA_DOUBLE stddev(struct grouping_stddev *g) {
}
NETDATA_DOUBLE grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_stddev *g = (struct grouping_stddev *)r->grouping.data;
struct grouping_stddev *g = (struct grouping_stddev *)r->time_grouping.data;
NETDATA_DOUBLE value;
@ -89,7 +89,7 @@ NETDATA_DOUBLE grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_optio
// https://en.wikipedia.org/wiki/Coefficient_of_variation
NETDATA_DOUBLE grouping_flush_coefficient_of_variation(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_stddev *g = (struct grouping_stddev *)r->grouping.data;
struct grouping_stddev *g = (struct grouping_stddev *)r->time_grouping.data;
NETDATA_DOUBLE value;

View file

@ -11,30 +11,30 @@ struct grouping_sum {
};
void grouping_create_sum(RRDR *r, const char *options __maybe_unused) {
r->grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_sum));
r->time_grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_sum));
}
// resets when switches dimensions
// so, clear everything to restart
void grouping_reset_sum(RRDR *r) {
struct grouping_sum *g = (struct grouping_sum *)r->grouping.data;
struct grouping_sum *g = (struct grouping_sum *)r->time_grouping.data;
g->sum = 0;
g->count = 0;
}
void grouping_free_sum(RRDR *r) {
onewayalloc_freez(r->internal.owa, r->grouping.data);
r->grouping.data = NULL;
onewayalloc_freez(r->internal.owa, r->time_grouping.data);
r->time_grouping.data = NULL;
}
void grouping_add_sum(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_sum *g = (struct grouping_sum *)r->grouping.data;
struct grouping_sum *g = (struct grouping_sum *)r->time_grouping.data;
g->sum += value;
g->count++;
}
NETDATA_DOUBLE grouping_flush_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_sum *g = (struct grouping_sum *)r->grouping.data;
struct grouping_sum *g = (struct grouping_sum *)r->time_grouping.data;
NETDATA_DOUBLE value;

View file

@ -30,7 +30,7 @@ static void grouping_create_trimmed_mean_internal(RRDR *r, const char *options,
}
g->percent = 1.0 - ((g->percent / 100.0) * 2.0);
r->grouping.data = g;
r->time_grouping.data = g;
}
void grouping_create_trimmed_mean1(RRDR *r, const char *options) {
@ -61,20 +61,20 @@ void grouping_create_trimmed_mean25(RRDR *r, const char *options) {
// resets when switches dimensions
// so, clear everything to restart
void grouping_reset_trimmed_mean(RRDR *r) {
struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->grouping.data;
struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->time_grouping.data;
g->next_pos = 0;
}
void grouping_free_trimmed_mean(RRDR *r) {
struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->grouping.data;
struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->time_grouping.data;
if(g) onewayalloc_freez(r->internal.owa, g->series);
onewayalloc_freez(r->internal.owa, r->grouping.data);
r->grouping.data = NULL;
onewayalloc_freez(r->internal.owa, r->time_grouping.data);
r->time_grouping.data = NULL;
}
void grouping_add_trimmed_mean(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->grouping.data;
struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->time_grouping.data;
if(unlikely(g->next_pos >= g->series_size)) {
g->series = onewayalloc_doublesize( r->internal.owa, g->series, g->series_size * sizeof(NETDATA_DOUBLE));
@ -85,7 +85,7 @@ void grouping_add_trimmed_mean(RRDR *r, NETDATA_DOUBLE value) {
}
NETDATA_DOUBLE grouping_flush_trimmed_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->grouping.data;
struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->time_grouping.data;
NETDATA_DOUBLE value;
size_t available_slots = g->next_pos;

View file

@ -57,3 +57,8 @@ RRDCONTEXT_TO_JSON_OPTIONS rrdcontext_to_json_parse_options(char *o) {
return options;
}
bool web_client_interrupt_callback(void *data) {
struct web_client *w = data;
return sock_has_output_error(w->ofd);
}

View file

@ -29,6 +29,8 @@ static inline void fix_google_param(char *s) {
}
}
bool web_client_interrupt_callback(void *data);
#include "web_api_v1.h"
#include "web_api_v2.h"

View file

@ -43,7 +43,9 @@ static struct {
, {"all-dimensions" , 0 , RRDR_OPTION_ALL_DIMENSIONS}
, {"details" , 0 , RRDR_OPTION_SHOW_DETAILS}
, {"debug" , 0 , RRDR_OPTION_DEBUG}
, {"plan" , 0 , RRDR_OPTION_DEBUG}
, {"minify" , 0 , RRDR_OPTION_MINIFY}
, {"group-by-labels" , 0 , RRDR_OPTION_GROUP_BY_LABELS}
, {NULL , 0 , 0}
};

View file

@ -79,7 +79,7 @@ static int web_client_api_request_v2_data(RRDHOST *host __maybe_unused, struct w
RRDR_TIME_GROUPING time_group = RRDR_GROUPING_AVERAGE;
RRDR_GROUP_BY group_by = RRDR_GROUP_BY_DIMENSION;
RRDR_GROUP_BY_FUNCTION group_by_aggregate = RRDR_GROUP_BY_FUNCTION_AVERAGE;
DATASOURCE_FORMAT format = DATASOURCE_JSON;
DATASOURCE_FORMAT format = DATASOURCE_JSON2;
RRDR_OPTIONS options = RRDR_OPTION_VIRTUAL_POINTS | RRDR_OPTION_JSON_WRAP | RRDR_OPTION_RETURN_JWAR;
while(url) {
@ -166,7 +166,7 @@ static int web_client_api_request_v2_data(RRDHOST *host __maybe_unused, struct w
if(group_by & RRDR_GROUP_BY_SELECTED)
group_by = RRDR_GROUP_BY_SELECTED; // remove all other groupings
if(group_by & ~(RRDR_GROUP_BY_DIMENSION))
if((group_by & ~(RRDR_GROUP_BY_DIMENSION)) || (options & RRDR_OPTION_PERCENTAGE))
options |= RRDR_OPTION_ABSOLUTE;
if(options & RRDR_OPTION_DEBUG)
@ -215,6 +215,9 @@ static int web_client_api_request_v2_data(RRDHOST *host __maybe_unused, struct w
.query_source = QUERY_SOURCE_API_DATA,
.priority = STORAGE_PRIORITY_NORMAL,
.received_ut = received_ut,
.interrupt_callback = web_client_interrupt_callback,
.interrupt_callback_data = w,
};
QUERY_TARGET *qt = query_target_create(&qtr);
ONEWAYALLOC *owa = NULL;