mirror of
https://github.com/netdata/netdata.git
synced 2025-05-02 16:30:12 +00:00

* rrdfamily * rrddim * rrdset plugin and module names * rrdset units * rrdset type * rrdset family * rrdset title * rrdset title more * rrdset context * rrdcalctemplate context and removal of context hash from rrdset * strings statistics * rrdset name * rearranged members of rrdset * eliminate rrdset name hash; rrdcalc chart converted to STRING * rrdset id, eliminated rrdset hash * rrdcalc, alarm_entry, alert_config and some of rrdcalctemplate * rrdcalctemplate * rrdvar * eval_variable * rrddimvar and rrdsetvar * rrdhost hostname, os and tags * fix master commits * added thread cache; implemented string_dup without locks * faster thread cache * rrdset and rrddim now use dictionaries for indexing * rrdhost now uses dictionary * rrdfamily now uses DICTIONARY * rrdvar using dictionary instead of AVL * allocate the right size to rrdvar flag members * rrdhost remaining char * members to STRING * * better error handling on indexing * strings now use a read/write lock to allow parallel searches to the index * removed AVL support from dictionaries; implemented STRING with native Judy calls * string releases should be negative * only 31 bits are allowed for enum flags * proper locking on strings * string threading unittest and fixes * fix lgtm finding * fixed naming * stream chart/dimension definitions at the beginning of a streaming session * thread stack variable is undefined on thread cancel * rrdcontext garbage collect per host on startup * worker control in garbage collection * relaxed deletion of rrdmetrics * type checking on dictfe * netdata chart to monitor rrdcontext triggers * Group chart label updates * rrdcontext better handling of collected rrdsets * rrdpush incremental transmition of definitions should use as much buffer as possible * require 1MB per chart * empty the sender buffer before enabling metrics streaming * fill up to 50% of buffer * reset signaling metrics sending * use the shared variable for status * use separate host flag for enabling streaming of metrics * make sure the flag is clear * add logging for streaming * add logging for streaming on buffer overflow * circular_buffer proper sizing * removed obsolete logs * do not execute worker jobs if not necessary * better messages about compression disabling * proper use of flags and updating rrdset last access time every time the obsoletion flag is flipped * monitor stream sender used buffer ratio * Update exporting unit tests * no need to compare label value with strcmp * streaming send workers now monitor bandwidth * workers now use strings * streaming receiver monitors incoming bandwidth * parser shift of worker ids * minor fixes * Group chart label updates * Populate context with dimensions that have data * Fix chart id * better shift of parser worker ids * fix for streaming compression * properly count received bytes * ensure LZ4 compression ring buffer does not wrap prematurely * do not stream empty charts; do not process empty instances in rrdcontext * need_to_send_chart_definition() does not need an rrdset lock any more * rrdcontext objects are collected, after data have been written to the db * better logging of RRDCONTEXT transitions * always set all variables needed by the worker utilization charts * implemented double linked list for most objects; eliminated alarm indexes from rrdhost; and many more fixes * lockless strings design - string_dup() and string_freez() are totally lockless when they dont need to touch Judy - only Judy is protected with a read/write lock * STRING code re-organization for clarity * thread_cache improvements; double numbers precision on worker threads * STRING_ENTRY now shadown STRING, so no duplicate definition is required; string_length() renamed to string_strlen() to follow the paradigm of all other functions, STRING internal statistics are now only compiled with NETDATA_INTERNAL_CHECKS * rrdhost index by hostname now cleans up; aclk queries of archieved hosts do not index hosts * Add index to speed up database context searches * Removed last_updated optimization (was also buggy after latest merge with master) Co-authored-by: Stelios Fragkakis <52996999+stelfrag@users.noreply.github.com> Co-authored-by: Vladimir Kobal <vlad@prokk.net>
291 lines
11 KiB
C
291 lines
11 KiB
C
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
|
|
#include "json.h"
|
|
|
|
#define JSON_DATES_JS 1
|
|
#define JSON_DATES_TIMESTAMP 2
|
|
|
|
void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable, struct context_param *context_param_list)
|
|
{
|
|
RRDDIM *temp_rd = context_param_list ? context_param_list->rd : NULL;
|
|
|
|
int should_lock = (!context_param_list || !(context_param_list->flags & CONTEXT_FLAGS_ARCHIVE));
|
|
|
|
if (should_lock)
|
|
rrdset_check_rdlock(r->st);
|
|
|
|
//info("RRD2JSON(): %s: BEGIN", r->st->id);
|
|
int row_annotations = 0, dates, dates_with_new = 0;
|
|
char kq[2] = "", // key quote
|
|
sq[2] = "", // string quote
|
|
pre_label[101] = "", // before each label
|
|
post_label[101] = "", // after each label
|
|
pre_date[101] = "", // the beginning of line, to the date
|
|
post_date[101] = "", // closing the date
|
|
pre_value[101] = "", // before each value
|
|
post_value[101] = "", // after each value
|
|
post_line[101] = "", // at the end of each row
|
|
normal_annotation[201] = "", // default row annotation
|
|
overflow_annotation[201] = "", // overflow row annotation
|
|
data_begin[101] = "", // between labels and values
|
|
finish[101] = "", // at the end of everything
|
|
object_rows_time[101] = "";
|
|
|
|
if(datatable) {
|
|
dates = JSON_DATES_JS;
|
|
if( options & RRDR_OPTION_GOOGLE_JSON ) {
|
|
kq[0] = '\0';
|
|
sq[0] = '\'';
|
|
}
|
|
else {
|
|
kq[0] = '"';
|
|
sq[0] = '"';
|
|
}
|
|
row_annotations = 1;
|
|
snprintfz(pre_date, 100, " {%sc%s:[{%sv%s:%s", kq, kq, kq, kq, sq);
|
|
snprintfz(post_date, 100, "%s}", sq);
|
|
snprintfz(pre_label, 100, ",\n {%sid%s:%s%s,%slabel%s:%s", kq, kq, sq, sq, kq, kq, sq);
|
|
snprintfz(post_label, 100, "%s,%spattern%s:%s%s,%stype%s:%snumber%s}", sq, kq, kq, sq, sq, kq, kq, sq, sq);
|
|
snprintfz(pre_value, 100, ",{%sv%s:", kq, kq);
|
|
strcpy(post_value, "}");
|
|
strcpy(post_line, "]}");
|
|
snprintfz(data_begin, 100, "\n ],\n %srows%s:\n [\n", kq, kq);
|
|
strcpy(finish, "\n]\n}");
|
|
|
|
snprintfz(overflow_annotation, 200, ",{%sv%s:%sRESET OR OVERFLOW%s},{%sv%s:%sThe counters have been wrapped.%s}", kq, kq, sq, sq, kq, kq, sq, sq);
|
|
snprintfz(normal_annotation, 200, ",{%sv%s:null},{%sv%s:null}", kq, kq, kq, kq);
|
|
|
|
buffer_sprintf(wb, "{\n %scols%s:\n [\n", kq, kq);
|
|
buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%stime%s,%spattern%s:%s%s,%stype%s:%sdatetime%s},\n", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq);
|
|
buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%s%s,%spattern%s:%s%s,%stype%s:%sstring%s,%sp%s:{%srole%s:%sannotation%s}},\n", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, kq, kq, sq, sq);
|
|
buffer_sprintf(wb, " {%sid%s:%s%s,%slabel%s:%s%s,%spattern%s:%s%s,%stype%s:%sstring%s,%sp%s:{%srole%s:%sannotationText%s}}", kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, sq, sq, kq, kq, kq, kq, sq, sq);
|
|
|
|
// remove the valueobjects flag
|
|
// google wants its own keys
|
|
if(options & RRDR_OPTION_OBJECTSROWS)
|
|
options &= ~RRDR_OPTION_OBJECTSROWS;
|
|
}
|
|
else {
|
|
kq[0] = '"';
|
|
sq[0] = '"';
|
|
if(options & RRDR_OPTION_GOOGLE_JSON) {
|
|
dates = JSON_DATES_JS;
|
|
dates_with_new = 1;
|
|
}
|
|
else {
|
|
dates = JSON_DATES_TIMESTAMP;
|
|
dates_with_new = 0;
|
|
}
|
|
if( options & RRDR_OPTION_OBJECTSROWS )
|
|
strcpy(pre_date, " { ");
|
|
else
|
|
strcpy(pre_date, " [ ");
|
|
strcpy(pre_label, ",\"");
|
|
strcpy(post_label, "\"");
|
|
strcpy(pre_value, ",");
|
|
if( options & RRDR_OPTION_OBJECTSROWS )
|
|
strcpy(post_line, "}");
|
|
else
|
|
strcpy(post_line, "]");
|
|
snprintfz(data_begin, 100, "],\n %sdata%s:\n [\n", kq, kq);
|
|
strcpy(finish, "\n]\n}");
|
|
|
|
buffer_sprintf(wb, "{\n %slabels%s: [", kq, kq);
|
|
buffer_sprintf(wb, "%stime%s", sq, sq);
|
|
|
|
if( options & RRDR_OPTION_OBJECTSROWS )
|
|
snprintfz(object_rows_time, 100, "%stime%s: ", kq, kq);
|
|
|
|
}
|
|
|
|
size_t pre_value_len = strlen(pre_value);
|
|
size_t post_value_len = strlen(post_value);
|
|
size_t pre_label_len = strlen(pre_label);
|
|
size_t post_label_len = strlen(post_label);
|
|
size_t pre_date_len = strlen(pre_date);
|
|
size_t post_date_len = strlen(post_date);
|
|
size_t post_line_len = strlen(post_line);
|
|
size_t normal_annotation_len = strlen(normal_annotation);
|
|
size_t overflow_annotation_len = strlen(overflow_annotation);
|
|
size_t object_rows_time_len = strlen(object_rows_time);
|
|
|
|
// -------------------------------------------------------------------------
|
|
// print the JSON header
|
|
|
|
long c, i;
|
|
RRDDIM *rd;
|
|
|
|
// print the header lines
|
|
for(c = 0, i = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
|
|
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
|
|
if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
|
|
|
|
buffer_fast_strcat(wb, pre_label, pre_label_len);
|
|
buffer_strcat(wb, rrddim_name(rd));
|
|
// buffer_strcat(wb, ".");
|
|
// buffer_strcat(wb, rd->rrdset->name);
|
|
buffer_fast_strcat(wb, post_label, post_label_len);
|
|
i++;
|
|
}
|
|
if(!i) {
|
|
buffer_fast_strcat(wb, pre_label, pre_label_len);
|
|
buffer_fast_strcat(wb, "no data", 7);
|
|
buffer_fast_strcat(wb, post_label, post_label_len);
|
|
}
|
|
size_t total_number_of_dimensions = i;
|
|
|
|
// print the begin of row data
|
|
buffer_strcat(wb, data_begin);
|
|
|
|
// if all dimensions are hidden, print a null
|
|
if(!i) {
|
|
buffer_strcat(wb, finish);
|
|
return;
|
|
}
|
|
|
|
long start = 0, end = rrdr_rows(r), step = 1;
|
|
if(!(options & RRDR_OPTION_REVERSED)) {
|
|
start = rrdr_rows(r) - 1;
|
|
end = -1;
|
|
step = -1;
|
|
}
|
|
|
|
// pre-allocate a large enough buffer for us
|
|
// this does not need to be accurate - it is just a hint to avoid multiple realloc().
|
|
buffer_need_bytes(wb,
|
|
( 20 * rrdr_rows(r)) // timestamp + json overhead
|
|
+ ( (pre_value_len + post_value_len + 4) * total_number_of_dimensions * rrdr_rows(r) ) // number
|
|
);
|
|
|
|
// for each line in the array
|
|
NETDATA_DOUBLE total = 1;
|
|
for(i = start; i != end ;i += step) {
|
|
NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
|
|
RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
|
|
NETDATA_DOUBLE *ar = &r->ar[ i * r->d ];
|
|
|
|
time_t now = r->t[i];
|
|
|
|
if(dates == JSON_DATES_JS) {
|
|
// generate the local date time
|
|
struct tm tmbuf, *tm = localtime_r(&now, &tmbuf);
|
|
if(!tm) { error("localtime_r() failed."); continue; }
|
|
|
|
if(likely(i != start)) buffer_fast_strcat(wb, ",\n", 2);
|
|
buffer_fast_strcat(wb, pre_date, pre_date_len);
|
|
|
|
if( options & RRDR_OPTION_OBJECTSROWS )
|
|
buffer_fast_strcat(wb, object_rows_time, object_rows_time_len);
|
|
|
|
if(unlikely(dates_with_new))
|
|
buffer_fast_strcat(wb, "new ", 4);
|
|
|
|
buffer_jsdate(wb, tm->tm_year + 1900, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec);
|
|
|
|
buffer_fast_strcat(wb, post_date, post_date_len);
|
|
|
|
if(unlikely(row_annotations)) {
|
|
// google supports one annotation per row
|
|
int annotation_found = 0;
|
|
for(c = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd ;c++, rd = rd->next) {
|
|
if(unlikely(!(r->od[c] & RRDR_DIMENSION_SELECTED))) continue;
|
|
|
|
if(unlikely(co[c] & RRDR_VALUE_RESET)) {
|
|
buffer_fast_strcat(wb, overflow_annotation, overflow_annotation_len);
|
|
annotation_found = 1;
|
|
break;
|
|
}
|
|
}
|
|
if(likely(!annotation_found))
|
|
buffer_fast_strcat(wb, normal_annotation, normal_annotation_len);
|
|
}
|
|
}
|
|
else {
|
|
// print the timestamp of the line
|
|
if(likely(i != start))
|
|
buffer_fast_strcat(wb, ",\n", 2);
|
|
|
|
buffer_fast_strcat(wb, pre_date, pre_date_len);
|
|
|
|
if(unlikely( options & RRDR_OPTION_OBJECTSROWS ))
|
|
buffer_fast_strcat(wb, object_rows_time, object_rows_time_len);
|
|
|
|
buffer_rrd_value(wb, (NETDATA_DOUBLE)r->t[i]);
|
|
|
|
// in ms
|
|
if(unlikely(options & RRDR_OPTION_MILLISECONDS))
|
|
buffer_fast_strcat(wb, "000", 3);
|
|
|
|
buffer_fast_strcat(wb, post_date, post_date_len);
|
|
}
|
|
|
|
int set_min_max = 0;
|
|
if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
|
|
total = 0;
|
|
for(c = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
|
|
NETDATA_DOUBLE n;
|
|
if(unlikely(options & RRDR_OPTION_INTERNAL_AR))
|
|
n = ar[c];
|
|
else
|
|
n = cn[c];
|
|
|
|
if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
|
|
n = -n;
|
|
|
|
total += n;
|
|
}
|
|
// prevent a division by zero
|
|
if(total == 0) total = 1;
|
|
set_min_max = 1;
|
|
}
|
|
|
|
// for each dimension
|
|
for(c = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
|
|
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
|
|
if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
|
|
|
|
NETDATA_DOUBLE n;
|
|
if(unlikely(options & RRDR_OPTION_INTERNAL_AR))
|
|
n = ar[c];
|
|
else
|
|
n = cn[c];
|
|
|
|
buffer_fast_strcat(wb, pre_value, pre_value_len);
|
|
|
|
if(unlikely( options & RRDR_OPTION_OBJECTSROWS ))
|
|
buffer_sprintf(wb, "%s%s%s: ", kq, rrddim_name(rd), kq);
|
|
|
|
if(co[c] & RRDR_VALUE_EMPTY && !(options & RRDR_OPTION_INTERNAL_AR)) {
|
|
if(unlikely(options & RRDR_OPTION_NULL2ZERO))
|
|
buffer_fast_strcat(wb, "0", 1);
|
|
else
|
|
buffer_fast_strcat(wb, "null", 4);
|
|
}
|
|
else {
|
|
if(unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
|
|
n = -n;
|
|
|
|
if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
|
|
n = n * 100 / total;
|
|
|
|
if(unlikely(set_min_max)) {
|
|
r->min = r->max = n;
|
|
set_min_max = 0;
|
|
}
|
|
|
|
if(n < r->min) r->min = n;
|
|
if(n > r->max) r->max = n;
|
|
}
|
|
|
|
buffer_rrd_value(wb, n);
|
|
}
|
|
|
|
buffer_fast_strcat(wb, post_value, post_value_len);
|
|
}
|
|
|
|
buffer_fast_strcat(wb, post_line, post_line_len);
|
|
}
|
|
|
|
buffer_strcat(wb, finish);
|
|
//info("RRD2JSON(): %s: END", r->st->id);
|
|
}
|