0
0
Fork 0
mirror of https://github.com/netdata/netdata.git synced 2025-04-17 11:12:42 +00:00

Prometheus remote write backend ()

* Add Prometheus remote write backend prototype

* Fix autotools issues

* Send HTTP POST request

* Add parameters to HTTP header

* Discard HTTP responce 200

* Update CMake build configuration

* Fix Codacy issue

* Check for C++ binary

* Fix compilation without remote write backend

* Add options to the installer script

* Fix configure script warning

* Fix make dist

* Downgrade to ByteSize for better compatibility

* Integrate remote write more tightly into the existing backends code

* Cleanup

* Fix build error

* Parse host tags

* Fix Codacy issue

* Fix counters for buffered data

* Rename preprocessor symbol

* Better error handling

* Cleanup

* Update the documentation
This commit is contained in:
Vladimir Kobal 2019-06-07 11:48:32 +03:00 committed by Chris Akritidis
parent 56c502be51
commit 77781d033d
20 changed files with 731 additions and 33 deletions

4
.gitignore vendored
View file

@ -59,6 +59,10 @@ xenstat.plugin
cgroup-network
!cgroup-network/
# protoc generated files
*.pb.cc
*.pb.h
# installation artifacts
packaging/installer/.environment.sh
*.tar.*

View file

@ -241,6 +241,27 @@ find_library(HAVE_KINESIS aws-cpp-sdk-kinesis)
# later we use:
# ${HAVE_KINESIS}
# -----------------------------------------------------------------------------
# Detect libprotobuf
pkg_check_modules(PROTOBUF protobuf)
# later we use:
# ${PROTOBUF_LIBRARIES}
# ${PROTOBUF_CFLAGS_OTHER}
# ${PROTOBUF_INCLUDE_DIRS}
# -----------------------------------------------------------------------------
# Detect libsnappy
pkg_check_modules(SNAPPY snappy)
# later we use:
# ${SNAPPY_LIBRARIES}
# ${SNAPPY_CFLAGS_OTHER}
# ${SNAPPY_INCLUDE_DIRS}
# -----------------------------------------------------------------------------
# netdata files
@ -547,6 +568,11 @@ set(KINESIS_BACKEND_FILES
backends/aws_kinesis/aws_kinesis_put_record.h
)
set(PROMETHEUS_REMOTE_WRITE_BACKEND_FILES
backends/prometheus/remote_write/remote_write.cc
backends/prometheus/remote_write/remote_write.h
)
set(DAEMON_FILES
daemon/common.c
daemon/common.h
@ -611,6 +637,29 @@ ELSE()
message(STATUS "kinesis backend: disabled (requires AWS SDK for C++)")
ENDIF()
# -----------------------------------------------------------------------------
# prometheus remote write backend
IF(PROTOBUF_LIBRARIES AND SNAPPY_LIBRARIES)
SET(ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE True)
ELSE()
SET(ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE False)
ENDIF()
IF(ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE)
message(STATUS "prometheus remote write backend: enabled")
find_package(Protobuf REQUIRED)
protobuf_generate_cpp(PROTO_SRCS PROTO_HDRS backends/prometheus/remote_write/remote_write.proto)
list(APPEND NETDATA_FILES ${PROMETHEUS_REMOTE_WRITE_BACKEND_FILES} ${PROTO_SRCS} ${PROTO_HDRS})
list(APPEND NETDATA_COMMON_LIBRARIES ${PROTOBUF_LIBRARIES} ${SNAPPY_LIBRARIES})
list(APPEND NETDATA_COMMON_INCLUDE_DIRS ${PROTOBUF_INCLUDE_DIRS} ${SNAPPY_INCLUDE_DIRS} ${CMAKE_CURRENT_BINARY_DIR})
list(APPEND NETDATA_COMMON_CFLAGS ${PROTOBUF_CFLAGS_OTHER} ${SNAPPY_CFLAGS_OTHER})
ELSE()
message(STATUS "prometheus remote write backend: disabled (requires protobuf and snappy libraries)")
ENDIF()
# -----------------------------------------------------------------------------
# netdata
@ -648,7 +697,7 @@ ELSEIF(MACOS)
ENDIF()
IF(ENABLE_BACKEND_KINESIS)
IF(ENABLE_BACKEND_KINESIS OR ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE)
set_property(TARGET netdata PROPERTY CXX_STANDARD 11)
set_property(TARGET netdata PROPERTY CMAKE_CXX_STANDARD_REQUIRED ON)
ENDIF()

View file

@ -441,6 +441,12 @@ KINESIS_BACKEND_FILES = \
backends/aws_kinesis/aws_kinesis_put_record.h \
$(NULL)
PROMETHEUS_REMOTE_WRITE_BACKEND_FILES = \
backends/prometheus/remote_write/remote_write.cc \
backends/prometheus/remote_write/remote_write.h \
backends/prometheus/remote_write/remote_write.proto \
$(NULL)
DAEMON_FILES = \
daemon/common.c \
daemon/common.h \
@ -505,14 +511,13 @@ NETDATA_COMMON_LIBS = \
$(OPTIONAL_JUDY_LIBS) \
$(OPTIONAL_SSL_LIBS) \
$(NULL)
# TODO: Find more graceful way to add libs for AWS Kinesis
sbin_PROGRAMS += netdata
netdata_SOURCES = $(NETDATA_FILES)
netdata_LDADD = \
$(NETDATA_COMMON_LIBS) \
$(NULL)
if ENABLE_BACKEND_KINESIS
if ENABLE_CXX_LINKER
netdata_LINK = $(CXXLD) $(CXXFLAGS) $(LDFLAGS) -o $@
else
netdata_LINK = $(CCLD) $(CFLAGS) $(LDFLAGS) -o $@
@ -575,3 +580,18 @@ if ENABLE_BACKEND_KINESIS
netdata_SOURCES += $(KINESIS_BACKEND_FILES)
netdata_LDADD += $(OPTIONAL_KINESIS_LIBS)
endif
if ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE
netdata_SOURCES += $(PROMETHEUS_REMOTE_WRITE_BACKEND_FILES)
netdata_LDADD += $(OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS)
BUILT_SOURCES = \
backends/prometheus/remote_write/remote_write.pb.cc \
backends/prometheus/remote_write/remote_write.pb.h \
$(NULL)
nodist_netdata_SOURCES = $(BUILT_SOURCES)
backends/prometheus/remote_write/remote_write.pb.cc \
backends/prometheus/remote_write/remote_write.pb.h: backends/prometheus/remote_write/remote_write.proto
$(PROTOC) --proto_path=$(srcdir) --cpp_out=$(builddir) $^
endif

View file

@ -32,6 +32,12 @@ X seconds (though, it can send them per second if you need it to).
- **prometheus** is described at [prometheus page](prometheus/) since it pulls data from netdata.
- **prometheus remote write** (a binary snappy-compressed protocol buffer encoding over HTTP used by
a lot of [storage providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage))
metrics are labeled in the format, which is used by Netdata for the [plaintext prometheus protocol](prometheus/).
Notes on using the remote write backend are [here](prometheus/remote_write/).
- **AWS Kinesis Data Streams**
metrics are sent to the service in `JSON` format.
@ -70,7 +76,7 @@ of `netdata.conf` from your netdata):
```
[backend]
enabled = yes | no
type = graphite | opentsdb | json | kinesis
type = graphite | opentsdb | json | prometheus_remote_write | kinesis
host tags = list of TAG=VALUE
destination = space separated list of [PROTOCOL:]HOST[:PORT] - the first working will be used, or a region for kinesis
data source = average | sum | as collected

View file

@ -260,6 +260,12 @@ void *backends_main(void *ptr) {
char *kinesis_auth_key_id = NULL, *kinesis_secure_key = NULL, *kinesis_stream_name = NULL;
#endif
#if ENABLE_PROMETHEUS_REMOTE_WRITE
int do_prometheus_remote_write = 0;
BUFFER *http_request_header = buffer_create(1);
#endif
// ------------------------------------------------------------------------
// collect configuration options
@ -285,6 +291,10 @@ void *backends_main(void *ptr) {
charts_pattern = simple_pattern_create(config_get(CONFIG_SECTION_BACKEND, "send charts matching", "*"), NULL, SIMPLE_PATTERN_EXACT);
hosts_pattern = simple_pattern_create(config_get(CONFIG_SECTION_BACKEND, "send hosts matching", "localhost *"), NULL, SIMPLE_PATTERN_EXACT);
#if ENABLE_PROMETHEUS_REMOTE_WRITE
const char *remote_write_path = config_get(CONFIG_SECTION_BACKEND, "remote write URL path", "/receive");
#endif
// ------------------------------------------------------------------------
// validate configuration options
// and prepare for sending data to our backend
@ -337,9 +347,8 @@ void *backends_main(void *ptr) {
backend_request_formatter = format_dimension_stored_json_plaintext;
}
#if HAVE_KINESIS
else if (!strcmp(type, "kinesis") || !strcmp(type, "kinesis:plaintext")) {
#if HAVE_KINESIS
do_kinesis = 1;
if(unlikely(read_kinesis_conf(netdata_configured_user_config_dir, &kinesis_auth_key_id, &kinesis_secure_key, &kinesis_stream_name))) {
@ -354,15 +363,31 @@ void *backends_main(void *ptr) {
backend_request_formatter = format_dimension_collected_json_plaintext;
else
backend_request_formatter = format_dimension_stored_json_plaintext;
}
#else
error("AWS Kinesis support isn't compiled");
#endif /* HAVE_KINESIS */
}
else if (!strcmp(type, "prometheus_remote_write")) {
#if ENABLE_PROMETHEUS_REMOTE_WRITE
do_prometheus_remote_write = 1;
backend_response_checker = process_prometheus_remote_write_response;
init_write_request();
#else
error("Prometheus remote write support isn't compiled");
#endif /* ENABLE_PROMETHEUS_REMOTE_WRITE */
}
else {
error("BACKEND: Unknown backend type '%s'", type);
goto cleanup;
}
#if ENABLE_PROMETHEUS_REMOTE_WRITE
if((backend_request_formatter == NULL && !do_prometheus_remote_write) || backend_response_checker == NULL) {
#else
if(backend_request_formatter == NULL || backend_response_checker == NULL) {
#endif
error("BACKEND: backend is misconfigured - disabling it.");
goto cleanup;
}
@ -451,6 +476,9 @@ void *backends_main(void *ptr) {
size_t count_charts_total = 0;
size_t count_dims_total = 0;
#if ENABLE_PROMETHEUS_REMOTE_WRITE
clear_write_request();
#endif
rrd_rdlock();
RRDHOST *host;
rrdhost_foreach_read(host) {
@ -478,26 +506,45 @@ void *backends_main(void *ptr) {
const char *__hostname = (host == localhost)?hostname:host->hostname;
RRDSET *st;
rrdset_foreach_read(st, host) {
if(likely(backends_can_send_rrdset(global_backend_options, st))) {
rrdset_rdlock(st);
#if ENABLE_PROMETHEUS_REMOTE_WRITE
if(do_prometheus_remote_write) {
rrd_stats_remote_write_allmetrics_prometheus(
host
, __hostname
, global_backend_prefix
, global_backend_options
, after
, before
, &count_charts
, &count_dims
, &count_dims_skipped
);
chart_buffered_metrics += count_dims;
}
else
#endif
{
RRDSET *st;
rrdset_foreach_read(st, host) {
if(likely(backends_can_send_rrdset(global_backend_options, st))) {
rrdset_rdlock(st);
count_charts++;
count_charts++;
RRDDIM *rd;
rrddim_foreach_read(rd, st) {
if (likely(rd->last_collected_time.tv_sec >= after)) {
chart_buffered_metrics += backend_request_formatter(b, global_backend_prefix, host, __hostname, st, rd, after, before, global_backend_options);
count_dims++;
}
else {
debug(D_BACKEND, "BACKEND: not sending dimension '%s' of chart '%s' from host '%s', its last data collection (%lu) is not within our timeframe (%lu to %lu)", rd->id, st->id, __hostname, (unsigned long)rd->last_collected_time.tv_sec, (unsigned long)after, (unsigned long)before);
count_dims_skipped++;
RRDDIM *rd;
rrddim_foreach_read(rd, st) {
if (likely(rd->last_collected_time.tv_sec >= after)) {
chart_buffered_metrics += backend_request_formatter(b, global_backend_prefix, host, __hostname, st, rd, after, before, global_backend_options);
count_dims++;
}
else {
debug(D_BACKEND, "BACKEND: not sending dimension '%s' of chart '%s' from host '%s', its last data collection (%lu) is not within our timeframe (%lu to %lu)", rd->id, st->id, __hostname, (unsigned long)rd->last_collected_time.tv_sec, (unsigned long)after, (unsigned long)before);
count_dims_skipped++;
}
}
rrdset_unlock(st);
}
rrdset_unlock(st);
}
}
@ -672,6 +719,43 @@ void *backends_main(void *ptr) {
flags += MSG_NOSIGNAL;
#endif
#if ENABLE_PROMETHEUS_REMOTE_WRITE
if(do_prometheus_remote_write) {
size_t data_size = get_write_request_size();
if(unlikely(!data_size)) {
error("BACKEND: write request size is out of range");
continue;
}
buffer_flush(b);
buffer_need_bytes(b, data_size);
if(unlikely(pack_write_request(b->buffer, &data_size))) {
error("BACKEND: cannot pack write request");
continue;
}
b->len = data_size;
chart_buffered_bytes = (collected_number)buffer_strlen(b);
buffer_flush(http_request_header);
buffer_sprintf(http_request_header,
"POST %s HTTP/1.1\r\n"
"Host: %s\r\n"
"Accept: */*\r\n"
"Content-Length: %zu\r\n"
"Content-Type: application/x-www-form-urlencoded\r\n\r\n",
remote_write_path,
hostname,
data_size
);
len = buffer_strlen(http_request_header);
send(sock, buffer_tostring(http_request_header), len, flags);
len = data_size;
}
#endif
ssize_t written = send(sock, buffer_tostring(b), len, flags);
// chart_backend_latency += now_monotonic_usec() - start_ut;
if(written != -1 && (size_t)written == len) {
@ -711,6 +795,16 @@ void *backends_main(void *ptr) {
}
}
#if ENABLE_PROMETHEUS_REMOTE_WRITE
if(failures) {
(void) buffer_on_failures;
failures = 0;
chart_lost_bytes = chart_buffered_bytes = get_write_request_size(); // estimated write request size
chart_data_lost_events++;
chart_lost_metrics = chart_buffered_metrics;
}
#else
if(failures > buffer_on_failures) {
// too bad! we are going to lose data
chart_lost_bytes += buffer_strlen(b);
@ -720,6 +814,7 @@ void *backends_main(void *ptr) {
chart_data_lost_events++;
chart_lost_metrics = chart_buffered_metrics;
}
#endif /* ENABLE_PROMETHEUS_REMOTE_WRITE */
if(unlikely(netdata_exit)) break;
@ -775,6 +870,13 @@ cleanup:
}
#endif
#if ENABLE_PROMETHEUS_REMOTE_WRITE
if(do_prometheus_remote_write) {
buffer_free(http_request_header);
protocol_buffers_shutdown();
}
#endif
if(sock != -1)
close(sock);

View file

@ -53,4 +53,8 @@ extern int discard_response(BUFFER *b, const char *backend);
#include "backends/aws_kinesis/aws_kinesis.h"
#endif
#if ENABLE_PROMETHEUS_REMOTE_WRITE
#include "backends/prometheus/remote_write/remote_write.h"
#endif
#endif /* NETDATA_BACKENDS_H */

View file

@ -3,6 +3,10 @@
AUTOMAKE_OPTIONS = subdir-objects
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
SUBDIRS = \
remote_write \
$(NULL)
dist_noinst_DATA = \
README.md \
$(NULL)

View file

@ -153,6 +153,8 @@ static inline char *prometheus_units_copy(char *d, const char *s, size_t usable,
#define PROMETHEUS_LABELS_MAX 1024
#define PROMETHEUS_VARIABLE_MAX 256
#define PROMETHEUS_LABELS_MAX_NUMBER 128
struct host_variables_callback_options {
RRDHOST *host;
BUFFER *wb;
@ -307,7 +309,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER
int as_collected = (BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED);
int homogeneous = 1;
if(as_collected) {
if(rrdset_flag_check(st, RRDSET_FLAG_HOMEGENEOUS_CHECK))
if(rrdset_flag_check(st, RRDSET_FLAG_HOMOGENEOUS_CHECK))
rrdset_update_heterogeneous_flag(st);
if(rrdset_flag_check(st, RRDSET_FLAG_HETEROGENEOUS))
@ -537,6 +539,177 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER
rrdhost_unlock(host);
}
#if ENABLE_PROMETHEUS_REMOTE_WRITE
inline static void remote_write_split_words(char *str, char **words, int max_words) {
char *s = str;
int i = 0;
while(*s && i < max_words - 1) {
while(*s && isspace(*s)) s++; // skip spaces to the begining of a tag name
if(*s)
words[i] = s;
while(*s && !isspace(*s) && *s != '=') s++; // find the end of the tag name
if(*s != '=') {
words[i] = NULL;
break;
}
*s = '\0';
s++;
i++;
while(*s && isspace(*s)) s++; // skip spaces to the begining of a tag value
if(*s && *s == '"') s++; // strip an opening quote
if(*s)
words[i] = s;
while(*s && !isspace(*s) && *s != ',') s++; // find the end of the tag value
if(*s && *s != ',') {
words[i] = NULL;
break;
}
if(s != words[i] && *(s - 1) == '"') *(s - 1) = '\0'; // strip a closing quote
if(*s != '\0') {
*s = '\0';
s++;
i++;
}
}
}
void rrd_stats_remote_write_allmetrics_prometheus(
RRDHOST *host
, const char *__hostname
, const char *prefix
, BACKEND_OPTIONS backend_options
, time_t after
, time_t before
, size_t *count_charts
, size_t *count_dims
, size_t *count_dims_skipped
) {
char hostname[PROMETHEUS_ELEMENT_MAX + 1];
prometheus_label_copy(hostname, __hostname, PROMETHEUS_ELEMENT_MAX);
add_host_info("netdata_info", hostname, host->program_name, host->program_version, now_realtime_usec() / USEC_PER_MS);
if(host->tags && *(host->tags)) {
char tags[PROMETHEUS_LABELS_MAX + 1];
strncpy(tags, host->tags, PROMETHEUS_LABELS_MAX);
char *words[PROMETHEUS_LABELS_MAX_NUMBER] = {NULL};
int i;
remote_write_split_words(tags, words, PROMETHEUS_LABELS_MAX_NUMBER);
add_host_info("netdata_host_tags_info", hostname, NULL, NULL, now_realtime_usec() / USEC_PER_MS);
for(i = 0; words[i] != NULL && words[i + 1] != NULL && (i + 1) < PROMETHEUS_LABELS_MAX_NUMBER; i += 2) {
add_tag(words[i], words[i + 1]);
}
}
// for each chart
RRDSET *st;
rrdset_foreach_read(st, host) {
char chart[PROMETHEUS_ELEMENT_MAX + 1];
char context[PROMETHEUS_ELEMENT_MAX + 1];
char family[PROMETHEUS_ELEMENT_MAX + 1];
char units[PROMETHEUS_ELEMENT_MAX + 1] = "";
prometheus_label_copy(chart, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, PROMETHEUS_ELEMENT_MAX);
prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
if(likely(backends_can_send_rrdset(backend_options, st))) {
rrdset_rdlock(st);
(*count_charts)++;
int as_collected = (BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED);
int homogeneous = 1;
if(as_collected) {
if(rrdset_flag_check(st, RRDSET_FLAG_HOMOGENEOUS_CHECK))
rrdset_update_heterogeneous_flag(st);
if(rrdset_flag_check(st, RRDSET_FLAG_HETEROGENEOUS))
homogeneous = 0;
}
else {
if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AVERAGE)
prometheus_units_copy(units, st->units, PROMETHEUS_ELEMENT_MAX, 0);
}
// for each dimension
RRDDIM *rd;
rrddim_foreach_read(rd, st) {
if(rd->collections_counter && !rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) {
char name[PROMETHEUS_LABELS_MAX + 1];
char dimension[PROMETHEUS_ELEMENT_MAX + 1];
char *suffix = "";
if (as_collected) {
// we need as-collected / raw data
if(unlikely(rd->last_collected_time.tv_sec < after)) {
debug(D_BACKEND, "BACKEND: not sending dimension '%s' of chart '%s' from host '%s', its last data collection (%lu) is not within our timeframe (%lu to %lu)", rd->id, st->id, __hostname, (unsigned long)rd->last_collected_time.tv_sec, (unsigned long)after, (unsigned long)before);
(*count_dims_skipped)++;
continue;
}
if(homogeneous) {
// all the dimensions of the chart, has the same algorithm, multiplier and divisor
// we add all dimensions as labels
prometheus_label_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s%s", prefix, context, suffix);
add_metric(name, chart, family, dimension, hostname, rd->last_collected_value, timeval_msec(&rd->last_collected_time));
(*count_dims)++;
}
else {
// the dimensions of the chart, do not have the same algorithm, multiplier or divisor
// we create a metric per dimension
prometheus_name_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s_%s%s", prefix, context, dimension, suffix);
add_metric(name, chart, family, NULL, hostname, rd->last_collected_value, timeval_msec(&rd->last_collected_time));
(*count_dims)++;
}
}
else {
// we need average or sum of the data
time_t first_t = after, last_t = before;
calculated_number value = backend_calculate_value_from_stored_data(st, rd, after, before, backend_options, &first_t, &last_t);
if(!isnan(value) && !isinf(value)) {
if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AVERAGE)
suffix = "_average";
else if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_SUM)
suffix = "_sum";
prometheus_label_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s%s%s", prefix, context, units, suffix);
add_metric(name, chart, family, dimension, hostname, rd->last_collected_value, timeval_msec(&rd->last_collected_time));
(*count_dims)++;
}
}
}
}
rrdset_unlock(st);
}
}
}
#endif /* ENABLE_PROMETHEUS_REMOTE_WRITE */
static inline time_t prometheus_preparation(RRDHOST *host, BUFFER *wb, BACKEND_OPTIONS backend_options, const char *server, time_t now, PROMETHEUS_OUTPUT_OPTIONS output_options) {
if(!server || !*server) server = "default";
@ -599,3 +772,26 @@ void rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(RRDHOST *host, BUFF
}
rrd_unlock();
}
#if ENABLE_PROMETHEUS_REMOTE_WRITE
int process_prometheus_remote_write_response(BUFFER *b) {
if(unlikely(!b)) return 1;
const char *s = buffer_tostring(b);
int len = buffer_strlen(b);
// do nothing with HTTP response 200
while(!isspace(*s) && len) {
s++;
len--;
}
s++;
len--;
if(likely(len > 4 && !strncmp(s, "200 ", 4)))
return 0;
else
return discard_response(b, "prometheus remote write");
}
#endif

View file

@ -19,4 +19,19 @@ typedef enum prometheus_output_flags {
extern void rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, BACKEND_OPTIONS backend_options, PROMETHEUS_OUTPUT_OPTIONS output_options);
extern void rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, BACKEND_OPTIONS backend_options, PROMETHEUS_OUTPUT_OPTIONS output_options);
#if ENABLE_PROMETHEUS_REMOTE_WRITE
extern void rrd_stats_remote_write_allmetrics_prometheus(
RRDHOST *host
, const char *__hostname
, const char *prefix
, BACKEND_OPTIONS backend_options
, time_t after
, time_t before
, size_t *count_charts
, size_t *count_dims
, size_t *count_dims_skipped
);
extern int process_prometheus_remote_write_response(BUFFER *b);
#endif
#endif //NETDATA_BACKEND_PROMETHEUS_H

View file

@ -0,0 +1,14 @@
# SPDX-License-Identifier: GPL-3.0-or-later
AUTOMAKE_OPTIONS = subdir-objects
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
CLEANFILES = \
remote_write.pb.cc \
remote_write.pb.h \
$(NULL)
dist_noinst_DATA = \
remote_write.proto \
README.md \
$(NULL)

View file

@ -0,0 +1,30 @@
# Prometheus remote write backend
## Prerequisites
To use the prometheus remote write API with [storage providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage) [protobuf](https://developers.google.com/protocol-buffers/) and [snappy](https://github.com/google/snappy) libraries should be installed first. Next, netdata should be re-installed from the source. The installer will detect that the required libraries and utilities are now available.
## Configuration
An additional option in the backend configuration section is available for the remote write backend:
```
[backend]
remote write URL path = /receive
```
The default value is `/receive`. `remote write URL path` is used to set an endpoint path for the remote write protocol. For example, if your endpoint is `http://example.domain:example_port/storage/read` you should set
```
[backend]
destination = example.domain:example_port
remote write URL path = /storage/read
```
`buffered` and `lost` dimensions in the Netdata Backend Data Size operation monitoring chart estimate uncompressed buffer size on failures.
## Notes
The remote write backend does not support `buffer on failures`
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2Fprometheus%2Fremote_write%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]()

View file

@ -0,0 +1,117 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include <snappy.h>
#include "remote_write.pb.h"
#include "remote_write.h"
using namespace prometheus;
google::protobuf::Arena arena;
WriteRequest *write_request;
void init_write_request() {
GOOGLE_PROTOBUF_VERIFY_VERSION;
write_request = google::protobuf::Arena::CreateMessage<WriteRequest>(&arena);
}
void clear_write_request() {
write_request->clear_timeseries();
}
void add_host_info(const char *name, const char *instance, const char *application, const char *version, const int64_t timestamp) {
TimeSeries *timeseries;
Sample *sample;
Label *label;
timeseries = write_request->add_timeseries();
label = timeseries->add_labels();
label->set_name("__name__");
label->set_value(name);
label = timeseries->add_labels();
label->set_name("instance");
label->set_value(instance);
if(application) {
label = timeseries->add_labels();
label->set_name("application");
label->set_value(application);
}
if(version) {
label = timeseries->add_labels();
label->set_name("version");
label->set_value(version);
}
sample = timeseries->add_samples();
sample->set_value(1);
sample->set_timestamp(timestamp);
}
// adds tag to the last created timeseries
void add_tag(char *tag, char *value) {
TimeSeries *timeseries;
Label *label;
timeseries = write_request->mutable_timeseries(write_request->timeseries_size() - 1);
label = timeseries->add_labels();
label->set_name(tag);
label->set_value(value);
}
void add_metric(const char *name, const char *chart, const char *family, const char *dimension, const char *instance, const double value, const int64_t timestamp) {
TimeSeries *timeseries;
Sample *sample;
Label *label;
timeseries = write_request->add_timeseries();
label = timeseries->add_labels();
label->set_name("__name__");
label->set_value(name);
label = timeseries->add_labels();
label->set_name("chart");
label->set_value(chart);
label = timeseries->add_labels();
label->set_name("family");
label->set_value(family);
if(dimension) {
label = timeseries->add_labels();
label->set_name("dimension");
label->set_value(dimension);
}
label = timeseries->add_labels();
label->set_name("instance");
label->set_value(instance);
sample = timeseries->add_samples();
sample->set_value(value);
sample->set_timestamp(timestamp);
}
size_t get_write_request_size(){
size_t size = (size_t)snappy::MaxCompressedLength(write_request->ByteSize());
return (size < INT_MAX)?size:0;
}
int pack_write_request(char *buffer, size_t *size) {
std::string uncompressed_write_request;
if(write_request->SerializeToString(&uncompressed_write_request) == false) return 1;
snappy::RawCompress(uncompressed_write_request.data(), uncompressed_write_request.size(), buffer, size);
return 0;
}
void protocol_buffers_shutdown() {
google::protobuf::ShutdownProtobufLibrary();
}

View file

@ -0,0 +1,30 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#ifndef NETDATA_BACKEND_PROMETHEUS_REMOTE_WRITE_H
#define NETDATA_BACKEND_PROMETHEUS_REMOTE_WRITE_H
#ifdef __cplusplus
extern "C" {
#endif
void init_write_request();
void clear_write_request();
void add_host_info(const char *name, const char *instance, const char *application, const char *version, const int64_t timestamp);
void add_tag(char *tag, char *value);
void add_metric(const char *name, const char *chart, const char *family, const char *dimension, const char *instance, const double value, const int64_t timestamp);
size_t get_write_request_size();
int pack_write_request(char *buffer, size_t *size);
void protocol_buffers_shutdown();
#ifdef __cplusplus
}
#endif
#endif //NETDATA_BACKEND_PROMETHEUS_REMOTE_WRITE_H

View file

@ -0,0 +1,29 @@
syntax = "proto3";
package prometheus;
option cc_enable_arenas = true;
import "google/protobuf/descriptor.proto";
message WriteRequest {
repeated TimeSeries timeseries = 1 [(nullable) = false];
}
message TimeSeries {
repeated Label labels = 1 [(nullable) = false];
repeated Sample samples = 2 [(nullable) = false];
}
message Label {
string name = 1;
string value = 2;
}
message Sample {
double value = 1;
int64 timestamp = 2;
}
extend google.protobuf.FieldOptions {
bool nullable = 65001;
}

View file

@ -76,7 +76,12 @@ AC_ARG_ENABLE(
,
[enable_backend_kinesis="detect"]
)
AC_ARG_ENABLE(
[backend-prometheus-remote-write],
[AS_HELP_STRING([--enable-backend-prometheus-remote-write], [enable prometheus remote write backend @<:@default autodetect@:>@])],
,
[enable_backend_prometheus_remote_write="detect"]
)
AC_ARG_ENABLE(
[pedantic],
[AS_HELP_STRING([--enable-pedantic], [enable pedantic compiler warnings @<:@default disabled@:>@])],
@ -791,6 +796,65 @@ AC_MSG_RESULT([${enable_backend_kinesis}])
AM_CONDITIONAL([ENABLE_BACKEND_KINESIS], [test "${enable_backend_kinesis}" = "yes"])
# -----------------------------------------------------------------------------
# Prometheus remote write backend - libprotobuf, libsnappy, protoc
PKG_CHECK_MODULES(
[PROTOBUF],
[protobuf],
[have_libprotobuf=yes],
[have_libprotobuf=no]
)
PKG_CHECK_MODULES(
[SNAPPY],
[snappy],
[have_libsnappy=yes],
[have_libsnappy=no]
)
AC_PATH_PROG([PROTOC], [protoc], [no])
AS_IF(
[test x"${PROTOC}" == x"no"],
[have_protoc=no],
[have_protoc=yes]
)
AC_PATH_PROG([CXX_BINARY], [${CXX}], [no])
AS_IF(
[test x"${CXX_BINARY}" == x"no"],
[have_CXX_compiler=no],
[have_CXX_compiler=yes]
)
test "${enable_backend_prometheus_remote_write}" = "yes" -a "${have_libprotobuf}" != "yes" && \
AC_MSG_ERROR([libprotobuf required but not found. try installing protobuf])
test "${enable_backend_prometheus_remote_write}" = "yes" -a "${have_libsnappy}" != "yes" && \
AC_MSG_ERROR([libsnappy required but not found. try installing snappy])
test "${enable_backend_prometheus_remote_write}" = "yes" -a "${have_protoc}" != "yes" && \
AC_MSG_ERROR([protoc compiler required but not found. try installing protobuf])
test "${enable_backend_prometheus_remote_write}" = "yes" -a "${have_CXX_compiler}" != "yes" && \
AC_MSG_ERROR([C++ compiler required but not found. try installing g++])
AC_MSG_CHECKING([if prometheus remote write backend should be enabled])
if test "${enable_backend_prometeus_remote_write}" != "no" -a "${have_libprotobuf}" = "yes" -a "${have_libsnappy}" = "yes" \
-a "${have_protoc}" = "yes" -a "${have_CXX_compiler}" = "yes"; then
enable_backend_prometheus_remote_write="yes"
AC_DEFINE([ENABLE_PROMETHEUS_REMOTE_WRITE], [1], [Prometheus remote write API usability])
OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS="${PROTOBUF_CFLAGS} ${SNAPPY_CFLAGS}"
CXX11FLAG="-std=c++11"
OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS="${PROTOBUF_LIBS} ${SNAPPY_LIBS} "
else
enable_backend_prometheus_remote_write="no"
fi
AC_MSG_RESULT([${enable_backend_prometheus_remote_write}])
AM_CONDITIONAL([ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE], [test "${enable_backend_prometheus_remote_write}" = "yes"])
# -----------------------------------------------------------------------------
# check for setns() - cgroup-network
@ -815,7 +879,7 @@ if test "${enable_lto}" != "no"; then
fi
if test "${have_lto}" = "yes"; then
oCFLAGS="${CFLAGS}"
CFLAGS="${CFLAGS} -flto ${OPTIONAL_MATH_CFLAGS} ${OPTIONAL_NFACCT_CFLAGS} ${OPTIONAL_ZLIB_CFLAGS} ${OPTIONAL_UUID_CFLAGS} ${OPTIONAL_LIBCAP_CFLAGS} ${OPTIONAL_IPMIMONITORING_CFLAGS} ${OPTIONAL_CUPS_CFLAGS} ${OPTIONAL_XENSTAT_FLAGS} ${OPTIONAL_KINESIS_CFLAGS}"
CFLAGS="${CFLAGS} -flto ${OPTIONAL_MATH_CFLAGS} ${OPTIONAL_NFACCT_CFLAGS} ${OPTIONAL_ZLIB_CFLAGS} ${OPTIONAL_UUID_CFLAGS} ${OPTIONAL_LIBCAP_CFLAGS} ${OPTIONAL_IPMIMONITORING_CFLAGS} ${OPTIONAL_CUPS_CFLAGS} ${OPTIONAL_XENSTAT_FLAGS} ${OPTIONAL_KINESIS_CFLAGS} ${OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS}"
ac_cv_c_lto_cross_compile="${enable_lto}"
test "${ac_cv_c_lto_cross_compile}" != "yes" && ac_cv_c_lto_cross_compile="no"
AC_C_LTO
@ -836,6 +900,8 @@ AC_MSG_RESULT([${enable_lto}])
# -----------------------------------------------------------------------------
AM_CONDITIONAL([ENABLE_CXX_LINKER], [test "${enable_backend_kinesis}" = "yes" -o "${enable_backend_prometheus_remote_write}" = "yes"])
AC_DEFINE_UNQUOTED([NETDATA_USER], ["${with_user}"], [use this user to drop privileged])
varlibdir="${localstatedir}/lib/netdata"
@ -898,6 +964,8 @@ AC_SUBST([OPTIONAL_XENSTAT_CFLAGS])
AC_SUBST([OPTIONAL_XENSTAT_LIBS])
AC_SUBST([OPTIONAL_KINESIS_CFLAGS])
AC_SUBST([OPTIONAL_KINESIS_LIBS])
AC_SUBST([OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS])
AC_SUBST([OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS])
AC_CONFIG_FILES([
@ -908,6 +976,7 @@ AC_CONFIG_FILES([
backends/Makefile
backends/opentsdb/Makefile
backends/prometheus/Makefile
backends/prometheus/remote_write/Makefile
backends/aws_kinesis/Makefile
collectors/Makefile
collectors/apps.plugin/Makefile

View file

@ -351,7 +351,7 @@ typedef enum rrdset_flags {
RRDSET_FLAG_UPSTREAM_EXPOSED = 1 << 8, // if set, we have sent this chart definition to netdata master (streaming)
RRDSET_FLAG_STORE_FIRST = 1 << 9, // if set, do not eliminate the first collection during interpolation
RRDSET_FLAG_HETEROGENEOUS = 1 << 10, // if set, the chart is not homogeneous (dimensions in it have multiple algorithms, multipliers or dividers)
RRDSET_FLAG_HOMEGENEOUS_CHECK = 1 << 11, // if set, the chart should be checked to determine if the dimensions as homogeneous
RRDSET_FLAG_HOMOGENEOUS_CHECK = 1 << 11, // if set, the chart should be checked to determine if the dimensions are homogeneous
RRDSET_FLAG_HIDDEN = 1 << 12, // if set, do not show this chart on the dashboard, but use it for backends
RRDSET_FLAG_SYNC_CLOCK = 1 << 13, // if set, microseconds on next data collection will be ignored (the chart will be synced to now)
RRDSET_FLAG_OBSOLETE_DIMENSIONS = 1 << 14 // this is marked by the collector/module when a chart has obsolete dimensions

View file

@ -60,7 +60,7 @@ inline int rrddim_set_algorithm(RRDSET *st, RRDDIM *rd, RRD_ALGORITHM algorithm)
debug(D_RRD_CALLS, "Updating algorithm of dimension '%s/%s' from %s to %s", st->id, rd->name, rrd_algorithm_name(rd->algorithm), rrd_algorithm_name(algorithm));
rd->algorithm = algorithm;
rd->exposed = 0;
rrdset_flag_set(st, RRDSET_FLAG_HOMEGENEOUS_CHECK);
rrdset_flag_set(st, RRDSET_FLAG_HOMOGENEOUS_CHECK);
rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED);
return 1;
}
@ -72,7 +72,7 @@ inline int rrddim_set_multiplier(RRDSET *st, RRDDIM *rd, collected_number multip
debug(D_RRD_CALLS, "Updating multiplier of dimension '%s/%s' from " COLLECTED_NUMBER_FORMAT " to " COLLECTED_NUMBER_FORMAT, st->id, rd->name, rd->multiplier, multiplier);
rd->multiplier = multiplier;
rd->exposed = 0;
rrdset_flag_set(st, RRDSET_FLAG_HOMEGENEOUS_CHECK);
rrdset_flag_set(st, RRDSET_FLAG_HOMOGENEOUS_CHECK);
rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED);
return 1;
}
@ -84,7 +84,7 @@ inline int rrddim_set_divisor(RRDSET *st, RRDDIM *rd, collected_number divisor)
debug(D_RRD_CALLS, "Updating divisor of dimension '%s/%s' from " COLLECTED_NUMBER_FORMAT " to " COLLECTED_NUMBER_FORMAT, st->id, rd->name, rd->divisor, divisor);
rd->divisor = divisor;
rd->exposed = 0;
rrdset_flag_set(st, RRDSET_FLAG_HOMEGENEOUS_CHECK);
rrdset_flag_set(st, RRDSET_FLAG_HOMOGENEOUS_CHECK);
rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED);
return 1;
}

View file

@ -210,7 +210,7 @@ inline void rrdset_update_heterogeneous_flag(RRDSET *st) {
RRDDIM *rd;
rrdset_flag_clear(st, RRDSET_FLAG_HOMEGENEOUS_CHECK);
rrdset_flag_clear(st, RRDSET_FLAG_HOMOGENEOUS_CHECK);
RRD_ALGORITHM algorithm = st->dimensions->algorithm;
collected_number multiplier = abs(st->dimensions->multiplier);

View file

@ -819,11 +819,15 @@ int connect_to_one_of(const char *destination, int default_port, struct timeval
while(*s) {
const char *e = s;
// skip path, moving both s(tart) and e(nd)
if(*e == '/')
while(!isspace(*e) && *e != ',') s = ++e;
// skip separators, moving both s(tart) and e(nd)
while(isspace(*e) || *e == ',') s = ++e;
// move e(nd) to the first separator
while(*e && !isspace(*e) && *e != ',') e++;
while(*e && !isspace(*e) && *e != ',' && *e != '/') e++;
// is there anything?
if(!*s || s == e) break;

View file

@ -165,6 +165,9 @@ USAGE: ${PROGRAM} [options]
--enable-backend-kinesis Enable AWS Kinesis backend. Default: enable it when libaws_cpp_sdk_kinesis and libraries
it depends on are available.
--disable-backend-kinesis
--enable-backend-prometheus-remote-write Enable Prometheus remote write backend. Default: enable it when libprotobuf and
libsnappy are available.
--disable-backend-prometheus-remote-write
--enable-lto Enable Link-Time-Optimization. Default: enabled
--disable-lto
--disable-x86-sse Disable SSE instructions. By default SSE optimizations are enabled.
@ -212,6 +215,8 @@ while [ -n "${1}" ]; do
"--disable-plugin-xenstat") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-plugin-xenstat/} --disable-plugin-xenstat";;
"--enable-backend-kinesis") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-backend-kinesis/} --enable-backend-kinesis";;
"--disable-backend-kinesis") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-backend-kinesis/} --disable-backend-kinesis";;
"--enable-backend-prometheus-remote-write") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-backend-prometheus-remote-write/} --enable-backend-prometheus-remote-write";;
"--disable-backend-prometheus-remote-write") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-backend-prometheus-remote-write/} --disable-backend-prometheus-remote-write";;
"--enable-lto") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-lto/} --enable-lto";;
"--disable-lto") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-lto/} --disable-lto";;
"--disable-x86-sse") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-x86-sse/} --disable-x86-sse";;