diff --git a/CMakeLists.txt b/CMakeLists.txt
index 6c5254b6e6..ca0ab99946 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1112,6 +1112,8 @@ endif()
         -Wl,--wrap=recv
         -Wl,--wrap=send
         -Wl,--wrap=connect_to_one_of
+        -Wl,--wrap=create_main_rusage_chart
+        -Wl,--wrap=send_main_rusage
         ${PROMETHEUS_REMOTE_WRITE_LINK_OPTIONS}
         ${KINESIS_LINK_OPTIONS}
         ${MONGODB_LINK_OPTIONS}
diff --git a/Makefile.am b/Makefile.am
index e4e240cbda..be84fc09ac 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -573,6 +573,7 @@ NETDATA_FILES = \
     $(LIBNETDATA_FILES) \
     $(API_PLUGIN_FILES) \
     $(BACKENDS_PLUGIN_FILES) \
+    $(EXPORTING_ENGINE_FILES) \
     $(CHECKS_PLUGIN_FILES) \
     $(HEALTH_PLUGIN_FILES) \
     $(IDLEJITTER_PLUGIN_FILES) \
@@ -608,12 +609,6 @@ if LINUX
 
 endif
 
-if ENABLE_EXPORTING
-    NETDATA_FILES += \
-        $(EXPORTING_ENGINE_FILES) \
-        $(NULL)
-endif
-
 NETDATA_COMMON_LIBS = \
     $(OPTIONAL_MATH_LIBS) \
     $(OPTIONAL_ZLIB_LIBS) \
@@ -745,23 +740,13 @@ if ENABLE_PLUGIN_SLABINFO
         $(NULL)
 endif
 
-if ENABLE_EXPORTING
 if ENABLE_BACKEND_KINESIS
-    netdata_SOURCES += $(KINESIS_EXPORTING_FILES)
-    netdata_LDADD += $(OPTIONAL_KINESIS_LIBS)
-endif
-endif
-
-if ENABLE_BACKEND_KINESIS
-    netdata_SOURCES += $(KINESIS_BACKEND_FILES)
+    netdata_SOURCES += $(KINESIS_BACKEND_FILES) $(KINESIS_EXPORTING_FILES)
     netdata_LDADD += $(OPTIONAL_KINESIS_LIBS)
 endif
 
 if ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE
-if ENABLE_EXPORTING
-    netdata_SOURCES += $(PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES)
-endif
-    netdata_SOURCES += $(PROMETHEUS_REMOTE_WRITE_BACKEND_FILES)
+    netdata_SOURCES += $(PROMETHEUS_REMOTE_WRITE_BACKEND_FILES) $(PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES)
     netdata_LDADD += $(OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS)
     BUILT_SOURCES = \
         exporting/prometheus/remote_write/remote_write.pb.cc \
@@ -775,15 +760,8 @@ exporting/prometheus/remote_write/remote_write.pb.h: exporting/prometheus/remote
 
 endif
 
-if ENABLE_EXPORTING
 if ENABLE_BACKEND_MONGODB
-    netdata_SOURCES += $(MONGODB_EXPORTING_FILES)
-    netdata_LDADD += $(OPTIONAL_MONGOC_LIBS)
-endif
-endif
-
-if ENABLE_BACKEND_MONGODB
-    netdata_SOURCES += $(MONGODB_BACKEND_FILES)
+    netdata_SOURCES += $(MONGODB_BACKEND_FILES) $(MONGODB_EXPORTING_FILES)
     netdata_LDADD += $(OPTIONAL_MONGOC_LIBS)
 endif
 
@@ -895,6 +873,8 @@ if ENABLE_UNITTESTS
         -Wl,--wrap=recv \
         -Wl,--wrap=send \
         -Wl,--wrap=connect_to_one_of \
+        -Wl,--wrap=create_main_rusage_chart \
+        -Wl,--wrap=send_main_rusage \
         $(TEST_LDFLAGS) \
         $(NULL)
     exporting_tests_exporting_engine_testdriver_LDADD = $(NETDATA_COMMON_LIBS) $(TEST_LIBS)
diff --git a/README.md b/README.md
index 2fd1ff70cc..ea93bcc6d7 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,7 @@
 <!--
 ---
 title: "Netdata"
+date: 2020-04-06
 custom_edit_url: https://github.com/netdata/netdata/edit/master/README.md
 ---
 -->
@@ -33,7 +34,7 @@ granularity. Run this long-term storage autonomously, or integrate Netdata with
 Netdata is **fast** and **efficient**, designed to permanently run on all systems (**physical** and **virtual** servers,
 **containers**, **IoT** devices), without disrupting their core function.
 
-Netdata is **free, open-source software** and it currently runs on **Linux**, **FreeBSD**, and **MacOS**, along with
+Netdata is **free, open-source software** and it currently runs on **Linux**, **FreeBSD**, and **macOS**, along with
 other systems derived from them, such as **Kubernetes** and **Docker**.
 
 Netdata is not hosted by the CNCF but is the 3rd most starred open-source project in the [Cloud Native Computing
diff --git a/aclk/agent_cloud_link.c b/aclk/agent_cloud_link.c
index 1adaf6bcce..a41d17e7bd 100644
--- a/aclk/agent_cloud_link.c
+++ b/aclk/agent_cloud_link.c
@@ -23,6 +23,8 @@ static char *aclk_password = NULL;
 static char *global_base_topic = NULL;
 static int aclk_connecting = 0;
 int aclk_connected = 0;             // Exposed in the web-api
+usec_t aclk_session_us = 0;         // Used by the mqtt layer
+time_t aclk_session_sec = 0;        // Used by the mqtt layer
 
 static netdata_mutex_t aclk_mutex = NETDATA_MUTEX_INITIALIZER;
 static netdata_mutex_t query_mutex = NETDATA_MUTEX_INITIALIZER;
@@ -185,7 +187,7 @@ biofailed:
  * should be called with
  *
  * mode 0 to reset the delay
- * mode 1 to sleep for the calculated amount of time [0 .. ACLK_MAX_BACKOFF_DELAY * 1000] ms
+ * mode 1 to calculate sleep time [0 .. ACLK_MAX_BACKOFF_DELAY * 1000] ms
  *
  */
 unsigned long int aclk_reconnect_delay(int mode)
@@ -208,8 +210,6 @@ unsigned long int aclk_reconnect_delay(int mode)
         delay = (delay * 1000) + (random() % 1000);
     }
 
-    //    sleep_usec(USEC_PER_MS * delay);
-
     return delay;
 }
 
@@ -306,7 +306,7 @@ int aclk_queue_query(char *topic, char *data, char *msg_id, char *query, int run
         if (tmp_query->run_after == run_after) {
             QUERY_UNLOCK;
             QUERY_THREAD_WAKEUP;
-            return 1;
+            return 0;
         }
 
         if (last_query)
@@ -750,8 +750,8 @@ int aclk_execute_query(struct aclk_query *this_query)
         buffer_flush(local_buffer);
         local_buffer->contenttype = CT_APPLICATION_JSON;
 
-        aclk_create_header(local_buffer, "http", this_query->msg_id);
-
+        aclk_create_header(local_buffer, "http", this_query->msg_id, 0, 0);
+        buffer_strcat(local_buffer, ",\n\t\"payload\": ");
         char *encoded_response = aclk_encode_response(w->response.data);
 
         buffer_sprintf(
@@ -821,11 +821,6 @@ int aclk_process_query()
             aclk_send_message(this_query->topic, this_query->query, this_query->msg_id);
             break;
 
-        case ACLK_CMD_ALARMS:
-            debug(D_ACLK, "EXECUTING an alarms update command");
-            aclk_send_alarm_metadata();
-            break;
-
         case ACLK_CMD_CLOUD:
             debug(D_ACLK, "EXECUTING a cloud command");
             aclk_execute_query(this_query);
@@ -868,18 +863,22 @@ int aclk_process_queries()
 static void aclk_query_thread_cleanup(void *ptr)
 {
     struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
-    static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
 
     info("cleaning up...");
 
-    COLLECTOR_LOCK;
-
     _reset_collector_list();
     freez(collector_list);
 
-    COLLECTOR_UNLOCK;
+    // Clean memory for pending queries if any
+    struct aclk_query *this_query;
 
-    static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+    do {
+        this_query = aclk_queue_pop();
+        aclk_query_free(this_query);
+    } while (this_query);
+
+    freez(static_thread->thread);
+    freez(static_thread);
 }
 
 /**
@@ -916,7 +915,7 @@ void *aclk_query_main_thread(void *ptr)
             if (unlikely(aclk_queue_query("on_connect", NULL, NULL, NULL, 0, 1, ACLK_CMD_ONCONNECT))) {
                 errno = 0;
                 error("ACLK failed to queue on_connect command");
-                aclk_metadata_submitted = 0;
+                aclk_metadata_submitted = ACLK_METADATA_REQUIRED;
             }
         }
 
@@ -939,7 +938,6 @@ void *aclk_query_main_thread(void *ptr)
 // Thread cleanup
 static void aclk_main_cleanup(void *ptr)
 {
-    char payload[512];
     struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
     static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
 
@@ -952,24 +950,11 @@ static void aclk_main_cleanup(void *ptr)
         // Wakeup thread to cleanup
         QUERY_THREAD_WAKEUP;
         // Send a graceful disconnect message
-        char *msg_id = create_uuid();
-
-        usec_t time_created_offset_usec = now_realtime_usec();
-        time_t time_created = time_created_offset_usec / USEC_PER_SEC;
-        time_created_offset_usec = time_created_offset_usec % USEC_PER_SEC;
-
-        snprintfz(
-            payload, 511,
-            "{ \"type\": \"disconnect\","
-            " \"msg-id\": \"%s\","
-            " \"timestamp\": %ld,"
-            " \"timestamp-offset-usec\": %llu,"
-            " \"version\": %d,"
-            " \"payload\": \"graceful\" }",
-            msg_id, time_created, time_created_offset_usec, ACLK_VERSION);
-
-        aclk_send_message(ACLK_METADATA_TOPIC, payload, msg_id);
-        freez(msg_id);
+        BUFFER *b = buffer_create(512);
+        aclk_create_header(b, "disconnect", NULL, 0, 0);
+        buffer_strcat(b, ",\n\t\"payload\": \"graceful\"}\n");
+        aclk_send_message(ACLK_METADATA_TOPIC, (char*)buffer_tostring(b), NULL);
+        buffer_free(b);
 
         event_loop_timeout = now_realtime_sec() + 5;
         write_q = 1;
@@ -990,7 +975,6 @@ static void aclk_main_cleanup(void *ptr)
         }
     }
 
-    info("Disconnected");
 
     static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
 }
@@ -1295,7 +1279,6 @@ void *aclk_main(void *ptr)
 {
     struct netdata_static_thread *query_thread;
 
-    netdata_thread_cleanup_push(aclk_main_cleanup, ptr);
     if (!netdata_cloud_setting) {
         info("Killing ACLK thread -> cloud functionality has been disabled");
         return NULL;
@@ -1335,10 +1318,11 @@ void *aclk_main(void *ptr)
         sleep_usec(USEC_PER_SEC * 60);
     }
     create_publish_base_topic();
-    create_private_key();
 
     usec_t reconnect_expiry = 0; // In usecs
 
+    netdata_thread_disable_cancelability();
+
     while (!netdata_exit) {
         static int first_init = 0;
         size_t write_q, write_q_bytes, read_q;
@@ -1392,7 +1376,8 @@ void *aclk_main(void *ptr)
         }
     } // forever
 exited:
-    aclk_shutdown();
+    // Wakeup query thread to cleanup
+    QUERY_THREAD_WAKEUP;
 
     freez(aclk_username);
     freez(aclk_password);
@@ -1401,7 +1386,7 @@ exited:
     if (aclk_private_key != NULL)
         RSA_free(aclk_private_key);
 
-    netdata_thread_cleanup_pop(1);
+    aclk_main_cleanup(ptr);
     return NULL;
 }
 
@@ -1514,7 +1499,7 @@ void aclk_shutdown()
     info("Shutdown complete");
 }
 
-inline void aclk_create_header(BUFFER *dest, char *type, char *msg_id)
+inline void aclk_create_header(BUFFER *dest, char *type, char *msg_id, time_t ts_secs, usec_t ts_us)
 {
     uuid_t uuid;
     char uuid_str[36 + 1];
@@ -1525,9 +1510,11 @@ inline void aclk_create_header(BUFFER *dest, char *type, char *msg_id)
         msg_id = uuid_str;
     }
 
-    usec_t time_created_offset_usec = now_realtime_usec();
-    time_t time_created = time_created_offset_usec / USEC_PER_SEC;
-    time_created_offset_usec = time_created_offset_usec % USEC_PER_SEC;
+    if (ts_secs == 0) {
+        ts_us = now_realtime_usec();
+        ts_secs = ts_us / USEC_PER_SEC;
+        ts_us = ts_us % USEC_PER_SEC;
+    }
 
     buffer_sprintf(
         dest,
@@ -1535,11 +1522,12 @@ inline void aclk_create_header(BUFFER *dest, char *type, char *msg_id)
         "\t\"msg-id\": \"%s\",\n"
         "\t\"timestamp\": %ld,\n"
         "\t\"timestamp-offset-usec\": %llu,\n"
-        "\t\"version\": %d,\n"
-        "\t\"payload\": ",
-        type, msg_id, time_created, time_created_offset_usec, ACLK_VERSION);
+        "\t\"connect\": %ld,\n"
+        "\t\"connect-offset-usec\": %llu,\n"
+        "\t\"version\": %d",
+        type, msg_id, ts_secs, ts_us, aclk_session_sec, aclk_session_us, ACLK_VERSION);
 
-    debug(D_ACLK, "Sending v%d msgid [%s] type [%s] time [%ld]", ACLK_VERSION, msg_id, type, time_created);
+    debug(D_ACLK, "Sending v%d msgid [%s] type [%s] time [%ld]", ACLK_VERSION, msg_id, type, ts_secs);
 }
 
 /*
@@ -1599,7 +1587,15 @@ void aclk_send_alarm_metadata()
 
     debug(D_ACLK, "Metadata alarms start");
 
-    aclk_create_header(local_buffer, "connect_alarms", msg_id);
+    // on_connect messages are sent on a health reload, if the on_connect message is real then we
+    // use the session time as the fake timestamp to indicate that it starts the session. If it is
+    // a fake on_connect message then use the real timestamp to indicate it is within the existing
+    // session.
+    if (aclk_metadata_submitted == ACLK_METADATA_SENT)
+        aclk_create_header(local_buffer, "connect_alarms", msg_id, 0, 0);
+    else
+        aclk_create_header(local_buffer, "connect_alarms", msg_id, aclk_session_sec, aclk_session_us);
+    buffer_strcat(local_buffer, ",\n\t\"payload\": ");
 
     buffer_sprintf(local_buffer, "{\n\t \"configured-alarms\" : ");
     health_alarms2json(localhost, local_buffer, 1);
@@ -1635,7 +1631,16 @@ int aclk_send_info_metadata()
     buffer_flush(local_buffer);
     local_buffer->contenttype = CT_APPLICATION_JSON;
 
-    aclk_create_header(local_buffer, "connect", msg_id);
+    // on_connect messages are sent on a health reload, if the on_connect message is real then we
+    // use the session time as the fake timestamp to indicate that it starts the session. If it is
+    // a fake on_connect message then use the real timestamp to indicate it is within the existing
+    // session.
+    if (aclk_metadata_submitted == ACLK_METADATA_SENT)
+        aclk_create_header(local_buffer, "connect", msg_id, 0, 0);
+    else
+        aclk_create_header(local_buffer, "connect", msg_id, aclk_session_sec, aclk_session_us);
+    buffer_strcat(local_buffer, ",\n\t\"payload\": ");
+
     buffer_sprintf(local_buffer, "{\n\t \"info\" : ");
     web_client_api_request_v1_info_fill_buffer(localhost, local_buffer);
     debug(D_ACLK, "Metadata %s with info has %zu bytes", msg_id, local_buffer->len);
@@ -1728,7 +1733,9 @@ int aclk_send_single_chart(char *hostname, char *chart)
     buffer_flush(local_buffer);
     local_buffer->contenttype = CT_APPLICATION_JSON;
 
-    aclk_create_header(local_buffer, "chart", msg_id);
+    aclk_create_header(local_buffer, "chart", msg_id, 0, 0);
+    buffer_strcat(local_buffer, ",\n\t\"payload\": ");
+
     rrdset2json(st, local_buffer, NULL, NULL, 1);
     buffer_sprintf(local_buffer, "\t\n}");
 
@@ -1793,7 +1800,8 @@ int aclk_update_alarm(RRDHOST *host, ALARM_ENTRY *ae)
     char *msg_id = create_uuid();
 
     buffer_flush(local_buffer);
-    aclk_create_header(local_buffer, "status-change", msg_id);
+    aclk_create_header(local_buffer, "status-change", msg_id, 0, 0);
+    buffer_strcat(local_buffer, ",\n\t\"payload\": ");
 
     netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock);
     health_alarm_entry2json_nolock(local_buffer, ae, host);
@@ -1863,6 +1871,12 @@ int aclk_handle_cloud_request(char *payload)
         return 1;
     }
 
+    // Checked to be "http", not needed anymore
+    if (likely(cloud_to_agent.type_id)) {
+        freez(cloud_to_agent.type_id);
+        cloud_to_agent.type_id = NULL;
+    }
+
     if (unlikely(aclk_submit_request(&cloud_to_agent)))
         debug(D_ACLK, "ACLK failed to queue incoming message (%s)", payload);
 
diff --git a/aclk/agent_cloud_link.h b/aclk/agent_cloud_link.h
index faf4932f84..f147669e5d 100644
--- a/aclk/agent_cloud_link.h
+++ b/aclk/agent_cloud_link.h
@@ -44,7 +44,6 @@ typedef enum aclk_cmd {
     ACLK_CMD_CHART,
     ACLK_CMD_CHARTDEL,
     ACLK_CMD_ALARM,
-    ACLK_CMD_ALARMS,
     ACLK_CMD_MAX
 } ACLK_CMD;
 
@@ -74,16 +73,12 @@ void *aclk_main(void *ptr);
 
 extern int aclk_send_message(char *sub_topic, char *message, char *msg_id);
 
-//int     aclk_init();
-//char    *get_base_topic();
-
 extern char *is_agent_claimed(void);
 extern void aclk_lws_wss_mqtt_layer_disconect_notif();
 char *create_uuid();
 
 // callbacks for agent cloud link
 int aclk_subscribe(char *topic, int qos);
-void aclk_shutdown();
 int cloud_to_agent_parse(JSON_ENTRY *e);
 void aclk_disconnect();
 void aclk_connect();
@@ -98,7 +93,7 @@ struct aclk_query *
 aclk_query_find(char *token, char *data, char *msg_id, char *query, ACLK_CMD cmd, struct aclk_query **last_query);
 int aclk_update_chart(RRDHOST *host, char *chart_name, ACLK_CMD aclk_cmd);
 int aclk_update_alarm(RRDHOST *host, ALARM_ENTRY *ae);
-void aclk_create_header(BUFFER *dest, char *type, char *msg_id);
+void aclk_create_header(BUFFER *dest, char *type, char *msg_id, time_t ts_secs, usec_t ts_us);
 int aclk_handle_cloud_request(char *payload);
 int aclk_submit_request(struct aclk_request *);
 void aclk_add_collector(const char *hostname, const char *plugin_name, const char *module_name);
diff --git a/aclk/mqtt.c b/aclk/mqtt.c
index dad32b578b..b070f7fb09 100644
--- a/aclk/mqtt.c
+++ b/aclk/mqtt.c
@@ -5,6 +5,9 @@
 #include "mqtt.h"
 #include "aclk_lws_wss_client.h"
 
+extern usec_t aclk_session_us;
+extern time_t aclk_session_sec;
+
 inline const char *_link_strerror(int rc)
 {
     return mosquitto_strerror(rc);
@@ -49,7 +52,12 @@ void disconnect_callback(struct mosquitto *mosq, void *obj, int rc)
     UNUSED(obj);
     UNUSED(rc);
 
-    info("Connection to cloud failed");
+    if (netdata_exit)
+        info("Connection to cloud terminated due to agent shutdown");
+    else {
+        errno = 0;
+        error("Connection to cloud failed");
+    }
     aclk_disconnect();
 
     aclk_lws_wss_mqtt_layer_disconect_notif();
@@ -131,6 +139,11 @@ static int _mqtt_create_connection(char *username, char *password)
         return MOSQ_ERR_UNKNOWN;
     }
 
+    // Record the session start time to allow a nominal LWT timestamp
+    usec_t now = now_realtime_usec();
+    aclk_session_sec = now / USEC_PER_SEC;
+    aclk_session_us = now % USEC_PER_SEC;  
+
     _link_set_lwt("outbound/meta", 2);
 
     mosquitto_connect_callback_set(mosq, connect_callback);
@@ -259,7 +272,6 @@ int _link_set_lwt(char *sub_topic, int qos)
 {
     int rc;
     char topic[ACLK_MAX_TOPIC + 1];
-    char payload[512];
     char *final_topic;
 
     final_topic = get_topic(sub_topic, topic, ACLK_MAX_TOPIC);
@@ -269,25 +281,13 @@ int _link_set_lwt(char *sub_topic, int qos)
         return 1;
     }
 
-    usec_t time_created_offset_usec = now_realtime_usec();
-    time_t time_created = time_created_offset_usec / USEC_PER_SEC;
-    time_created_offset_usec = time_created_offset_usec % USEC_PER_SEC;
+    usec_t lwt_time = aclk_session_sec * USEC_PER_SEC + aclk_session_us + 1;
+    BUFFER *b = buffer_create(512);
+    aclk_create_header(b, "disconnect", NULL, lwt_time / USEC_PER_SEC, lwt_time % USEC_PER_SEC);
+    buffer_strcat(b, ", \"payload\": \"unexpected\" }");
+    rc = mosquitto_will_set(mosq, topic, buffer_strlen(b), buffer_tostring(b), qos, 0);
+    buffer_free(b);
 
-    char *msg_id = create_uuid();
-
-    snprintfz(
-        payload, 511,
-        "{ \"type\": \"disconnect\","
-        " \"msg-id\": \"%s\","
-        " \"timestamp\": %ld,"
-        " \"timestamp-offset-usec\": %llu,"
-        " \"version\": %d,"
-        " \"payload\": \"unexpected\" }",
-        msg_id, time_created, time_created_offset_usec, ACLK_VERSION);
-
-    freez(msg_id);
-
-    rc = mosquitto_will_set(mosq, topic, strlen(payload), (const void *) payload, qos, 0);
     return rc;
 }
 
diff --git a/backends/backends.h b/backends/backends.h
index 212823a078..efa88a7f22 100644
--- a/backends/backends.h
+++ b/backends/backends.h
@@ -27,10 +27,6 @@ typedef enum backend_types {
     BACKEND_TYPE_NUM                        // Number of backend types
 } BACKEND_TYPE;
 
-#ifdef ENABLE_EXPORTING
-#include "exporting/exporting_engine.h"
-#endif
-
 typedef int (**backend_response_checker_t)(BUFFER *);
 typedef int (**backend_request_formatter_t)(BUFFER *, const char *, RRDHOST *, const char *, RRDSET *, RRDDIM *, time_t, time_t, BACKEND_OPTIONS);
 
diff --git a/backends/prometheus/backend_prometheus.c b/backends/prometheus/backend_prometheus.c
index b3f955e15f..0a7b3a3391 100644
--- a/backends/prometheus/backend_prometheus.c
+++ b/backends/prometheus/backend_prometheus.c
@@ -44,7 +44,7 @@ static inline time_t prometheus_server_last_access(const char *server, RRDHOST *
     return 0;
 }
 
-static inline size_t prometheus_name_copy(char *d, const char *s, size_t usable) {
+static inline size_t backends_prometheus_name_copy(char *d, const char *s, size_t usable) {
     size_t n;
 
     for(n = 0; *s && n < usable ; d++, s++, n++) {
@@ -58,7 +58,7 @@ static inline size_t prometheus_name_copy(char *d, const char *s, size_t usable)
     return n;
 }
 
-static inline size_t prometheus_label_copy(char *d, const char *s, size_t usable) {
+static inline size_t backends_prometheus_label_copy(char *d, const char *s, size_t usable) {
     size_t n;
 
     // make sure we can escape one character without overflowing the buffer
@@ -78,7 +78,7 @@ static inline size_t prometheus_label_copy(char *d, const char *s, size_t usable
     return n;
 }
 
-static inline char *prometheus_units_copy(char *d, const char *s, size_t usable, int showoldunits) {
+static inline char *backends_prometheus_units_copy(char *d, const char *s, size_t usable, int showoldunits) {
     const char *sorig = s;
     char *ret = d;
     size_t n;
@@ -194,7 +194,7 @@ static int print_host_variables(RRDVAR *rv, void *data) {
             label_post = "}";
         }
 
-        prometheus_name_copy(opts->name, rv->name, sizeof(opts->name));
+        backends_prometheus_name_copy(opts->name, rv->name, sizeof(opts->name));
 
         if(opts->output_options & BACKENDS_PROMETHEUS_OUTPUT_TIMESTAMPS)
             buffer_sprintf(opts->wb
@@ -227,7 +227,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER
     rrdhost_rdlock(host);
 
     char hostname[PROMETHEUS_ELEMENT_MAX + 1];
-    prometheus_label_copy(hostname, host->hostname, PROMETHEUS_ELEMENT_MAX);
+    backends_prometheus_label_copy(hostname, host->hostname, PROMETHEUS_ELEMENT_MAX);
 
     char labels[PROMETHEUS_LABELS_MAX + 1] = "";
     if(allhosts) {
@@ -299,9 +299,9 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER
         char family[PROMETHEUS_ELEMENT_MAX + 1];
         char units[PROMETHEUS_ELEMENT_MAX + 1] = "";
 
-        prometheus_label_copy(chart, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && st->name)?st->name:st->id, PROMETHEUS_ELEMENT_MAX);
-        prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
-        prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
+        backends_prometheus_label_copy(chart, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && st->name)?st->name:st->id, PROMETHEUS_ELEMENT_MAX);
+        backends_prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
+        backends_prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
 
         if(likely(backends_can_send_rrdset(backend_options, st))) {
             rrdset_rdlock(st);
@@ -317,7 +317,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER
             }
             else {
                 if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AVERAGE && !(output_options & BACKENDS_PROMETHEUS_OUTPUT_HIDEUNITS))
-                    prometheus_units_copy(units, st->units, PROMETHEUS_ELEMENT_MAX, output_options & BACKENDS_PROMETHEUS_OUTPUT_OLDUNITS);
+                    backends_prometheus_units_copy(units, st->units, PROMETHEUS_ELEMENT_MAX, output_options & BACKENDS_PROMETHEUS_OUTPUT_OLDUNITS);
             }
 
             if(unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP))
@@ -354,7 +354,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER
                             // all the dimensions of the chart, has the same algorithm, multiplier and divisor
                             // we add all dimensions as labels
 
-                            prometheus_label_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
+                            backends_prometheus_label_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
 
                             if(unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP))
                                 buffer_sprintf(wb
@@ -411,7 +411,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER
                             // the dimensions of the chart, do not have the same algorithm, multiplier or divisor
                             // we create a metric per dimension
 
-                            prometheus_name_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
+                            backends_prometheus_name_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
 
                             if(unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP))
                                 buffer_sprintf(wb
@@ -480,7 +480,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER
                             else if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_SUM)
                                 suffix = "_sum";
 
-                            prometheus_label_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
+                            backends_prometheus_label_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
 
                             if (unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP))
                                 buffer_sprintf(wb, "# COMMENT %s_%s%s%s: dimension \"%s\", value is %s, gauge, dt %llu to %llu inclusive\n"
@@ -593,7 +593,7 @@ void backends_rrd_stats_remote_write_allmetrics_prometheus(
         , size_t *count_dims_skipped
 ) {
     char hostname[PROMETHEUS_ELEMENT_MAX + 1];
-    prometheus_label_copy(hostname, __hostname, PROMETHEUS_ELEMENT_MAX);
+    backends_prometheus_label_copy(hostname, __hostname, PROMETHEUS_ELEMENT_MAX);
 
     backends_add_host_info("netdata_info", hostname, host->program_name, host->program_version, now_realtime_usec() / USEC_PER_MS);
 
@@ -620,9 +620,9 @@ void backends_rrd_stats_remote_write_allmetrics_prometheus(
         char family[PROMETHEUS_ELEMENT_MAX + 1];
         char units[PROMETHEUS_ELEMENT_MAX + 1] = "";
 
-        prometheus_label_copy(chart, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, PROMETHEUS_ELEMENT_MAX);
-        prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
-        prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
+        backends_prometheus_label_copy(chart, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, PROMETHEUS_ELEMENT_MAX);
+        backends_prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
+        backends_prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
 
         if(likely(backends_can_send_rrdset(backend_options, st))) {
             rrdset_rdlock(st);
@@ -640,7 +640,7 @@ void backends_rrd_stats_remote_write_allmetrics_prometheus(
             }
             else {
                 if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AVERAGE)
-                    prometheus_units_copy(units, st->units, PROMETHEUS_ELEMENT_MAX, 0);
+                    backends_prometheus_units_copy(units, st->units, PROMETHEUS_ELEMENT_MAX, 0);
             }
 
             // for each dimension
@@ -664,7 +664,7 @@ void backends_rrd_stats_remote_write_allmetrics_prometheus(
                             // all the dimensions of the chart, has the same algorithm, multiplier and divisor
                             // we add all dimensions as labels
 
-                            prometheus_label_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
+                            backends_prometheus_label_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
                             snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s%s", prefix, context, suffix);
 
                             backends_add_metric(name, chart, family, dimension, hostname, rd->last_collected_value, timeval_msec(&rd->last_collected_time));
@@ -674,7 +674,7 @@ void backends_rrd_stats_remote_write_allmetrics_prometheus(
                             // the dimensions of the chart, do not have the same algorithm, multiplier or divisor
                             // we create a metric per dimension
 
-                            prometheus_name_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
+                            backends_prometheus_name_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
                             snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s_%s%s", prefix, context, dimension, suffix);
 
                             backends_add_metric(name, chart, family, NULL, hostname, rd->last_collected_value, timeval_msec(&rd->last_collected_time));
@@ -694,7 +694,7 @@ void backends_rrd_stats_remote_write_allmetrics_prometheus(
                             else if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_SUM)
                                 suffix = "_sum";
 
-                            prometheus_label_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
+                            backends_prometheus_label_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
                             snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s%s%s", prefix, context, units, suffix);
 
                             backends_add_metric(name, chart, family, dimension, hostname, value, last_t * MSEC_PER_SEC);
diff --git a/build_external/README.md b/build_external/README.md
index 3614af1135..d04851e28e 100644
--- a/build_external/README.md
+++ b/build_external/README.md
@@ -1,6 +1,7 @@
 <!--
 ---
 title: "External build-system"
+date: 2020-03-31
 custom_edit_url: https://github.com/netdata/netdata/edit/master/build_external/README.md
 ---
 -->
@@ -9,7 +10,7 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/build_external/R
 
 This wraps the build-system in Docker so that the host system and the target system are
 decoupled. This allows:
-* Cross-compilation (e.g. linux development from MacOS)
+* Cross-compilation (e.g. linux development from macOS)
 * Cross-distro (e.g. using CentOS user-land while developing on Debian)
 * Multi-host scenarios (e.g. master/slave configurations)
 * Bleeding-edge sceneraios (e.g. using the ACLK (**currently for internal-use only**))
diff --git a/collectors/COLLECTORS.md b/collectors/COLLECTORS.md
index 190f45f8fd..e62d9f9d00 100644
--- a/collectors/COLLECTORS.md
+++ b/collectors/COLLECTORS.md
@@ -1,6 +1,7 @@
 <!--
 ---
 title: "Supported collectors list"
+date: 2020-03-31
 custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/COLLECTORS.md
 ---
 -->
@@ -37,7 +38,7 @@ collector—we may be looking for contributions from users such as yourself!
 | [diskspace.plugin](diskspace.plugin/README.md)   | Linux   | Collects disk space usage metrics on Linux mount points.                                   |
 | [freebsd.plugin](freebsd.plugin/README.md)       | FreeBSD | Collects resource usage and performance data on FreeBSD systems.                           |
 | [idlejitter.plugin](idlejitter.plugin/README.md) | any     | Measures CPU latency and jitter on all operating systems.                                  |
-| [macos.plugin](macos.plugin/README.md)           | macos   | Collects resource usage and performance data on MacOS systems.                             |
+| [macos.plugin](macos.plugin/README.md)           | macos   | Collects resource usage and performance data on macOS systems.                             |
 | [proc.plugin](proc.plugin/README.md)             | Linux   | Collects resource usage and performance data on Linux systems.                             |
 | [slabinfo.plugin](slabinfo.plugin/README.md)     | Linux   | Collects kernel SLAB details on Linux systems.                                             |
 | [statsd.plugin](statsd.plugin/README.md)         | any     | Implements a high performance `statsd` server for Netdata.                                 |
diff --git a/collectors/apps.plugin/apps_groups.conf b/collectors/apps.plugin/apps_groups.conf
index c0f82acfca..2ed6192b59 100644
--- a/collectors/apps.plugin/apps_groups.conf
+++ b/collectors/apps.plugin/apps_groups.conf
@@ -96,7 +96,7 @@ fail2ban: fail2ban*
 # -----------------------------------------------------------------------------
 # web/ftp servers
 
-httpd: apache* httpd nginx* lighttpd
+httpd: apache* httpd nginx* lighttpd hiawatha
 proxy: squid* c-icap squidGuard varnish*
 php: php*
 ftpd: proftpd in.tftpd vsftpd
@@ -107,7 +107,7 @@ puma: *puma*
 # -----------------------------------------------------------------------------
 # database servers
 
-sql: mysqld* mariad* postgres* postmaster* oracle_* ora_*
+sql: mysqld* mariad* postgres* postmaster* oracle_* ora_* sqlservr
 nosql: mongod redis* memcached *couchdb*
 timedb: prometheus *carbon-cache.py* *carbon-aggregator.py* *graphite/manage.py* *net.opentsdb.tools.TSDMain* influxd*
 columndb: clickhouse-server*
@@ -223,7 +223,7 @@ torrents: *deluge* transmission* *SickBeard* *CouchPotato* *rtorrent*
 # -----------------------------------------------------------------------------
 # backup servers and clients
 
-backup: rsync lsyncd bacula*
+backup: rsync lsyncd bacula* borg
 
 # -----------------------------------------------------------------------------
 # cron
@@ -239,7 +239,7 @@ ups: upsmon upsd */nut/*
 # media players, servers, clients
 
 media: mplayer vlc xine mediatomb omxplayer* kodi* xbmc* mediacenter eventlircd
-media: mpd minidlnad mt-daapd avahi* Plex*
+media: mpd minidlnad mt-daapd avahi* Plex* squeeze*
 
 # -----------------------------------------------------------------------------
 # java applications
diff --git a/collectors/apps.plugin/apps_plugin.c b/collectors/apps.plugin/apps_plugin.c
index 5f40270132..31a241c169 100644
--- a/collectors/apps.plugin/apps_plugin.c
+++ b/collectors/apps.plugin/apps_plugin.c
@@ -4110,8 +4110,6 @@ int main(int argc, char **argv) {
 
     procfile_adaptive_initial_allocation = 1;
 
-    time_t started_t = now_monotonic_sec();
-
     get_system_HZ();
 #ifdef __FreeBSD__
     time_factor = 1000000ULL / RATES_DETAIL; // FreeBSD uses usecs
@@ -4212,8 +4210,5 @@ int main(int argc, char **argv) {
         show_guest_time_old = show_guest_time;
 
         debug_log("done Loop No %zu", global_iterations_counter);
-
-        // restart check (14400 seconds)
-        if(now_monotonic_sec() - started_t > 14400) exit(0);
     }
 }
diff --git a/collectors/charts.d.plugin/libreswan/libreswan.chart.sh b/collectors/charts.d.plugin/libreswan/libreswan.chart.sh
index 3d0d3e3f7d..1320983a53 100644
--- a/collectors/charts.d.plugin/libreswan/libreswan.chart.sh
+++ b/collectors/charts.d.plugin/libreswan/libreswan.chart.sh
@@ -37,6 +37,16 @@ declare -A libreswan_established_add_time=()
 # we need this to avoid converting tunnel names to chart IDs on every iteration
 declare -A libreswan_tunnel_charts=()
 
+is_able_sudo_ipsec() {
+  if ! sudo -n -l "${IPSEC_CMD}" whack --status > /dev/null 2>&1; then
+    return 1
+  fi
+  if ! sudo -n -l "${IPSEC_CMD}" whack --trafficstatus > /dev/null 2>&1; then
+    return 1
+  fi
+  return 0
+}
+
 # run the ipsec command
 libreswan_ipsec() {
   if [ ${libreswan_sudo} -ne 0 ]; then
@@ -92,6 +102,11 @@ libreswan_check() {
     return 1
   fi
 
+  if [ ${libreswan_sudo} -ne 0 ] && ! is_able_sudo_ipsec; then
+    error "not enough permissions to execute ipsec with sudo. Disabling Libreswan plugin."
+    return 1
+  fi
+
   # check that we can collect data
   libreswan_get || return 1
 
diff --git a/collectors/macos.plugin/README.md b/collectors/macos.plugin/README.md
index 6655ab2317..7e61efcd04 100644
--- a/collectors/macos.plugin/README.md
+++ b/collectors/macos.plugin/README.md
@@ -7,7 +7,7 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/macos
 
 # macos.plugin
 
-Collects resource usage and performance data on MacOS systems
+Collects resource usage and performance data on macOS systems
 
 By default, Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
 
diff --git a/collectors/python.d.plugin/mysql/mysql.chart.py b/collectors/python.d.plugin/mysql/mysql.chart.py
index a2109f6ba9..1737e16b4d 100644
--- a/collectors/python.d.plugin/mysql/mysql.chart.py
+++ b/collectors/python.d.plugin/mysql/mysql.chart.py
@@ -347,7 +347,7 @@ CHARTS = {
         ]
     },
     'threads_creation_rate': {
-        'options': [None, 'Threads Creation Rate', 'threads/s', 'threads', 'mysql.threads', 'line'],
+        'options': [None, 'Threads Creation Rate', 'threads/s', 'threads', 'mysql.threads_creation_rate', 'line'],
         'lines': [
             ['Threads_created', 'created', 'incremental'],
         ]
diff --git a/configure.ac b/configure.ac
index 7347dac1d5..463a70cbbc 100644
--- a/configure.ac
+++ b/configure.ac
@@ -433,20 +433,6 @@ fi
 AC_MSG_RESULT([${enable_https}])
 AM_CONDITIONAL([ENABLE_HTTPS], [test "${enable_https}" = "yes"])
 
-# -----------------------------------------------------------------------------
-# Exporting engine
-AC_MSG_CHECKING([if netdata exporting engine should be used])
-if test "${UV_LIBS}"; then
-    enable_exporting_engine="yes"
-    AC_DEFINE([ENABLE_EXPORTING], [1], [netdata exporting engine usability])
-    OPTIONAL_UV_CFLAGS="${UV_CFLAGS}"
-    OPTIONAL_UV_LIBS="${UV_LIBS}"
-else
-    enable_exporting_engine="no"
-fi
-AC_MSG_RESULT([${enable_exporting_engine}])
-AM_CONDITIONAL([ENABLE_EXPORTING], [test "${enable_exporting_engine}" = "yes"])
-
 # -----------------------------------------------------------------------------
 # JSON-C
 test "${enable_jsonc}" = "yes" -a -z "${JSONC_LIBS}" && \
diff --git a/daemon/common.h b/daemon/common.h
index fe799efe09..f86e61543f 100644
--- a/daemon/common.h
+++ b/daemon/common.h
@@ -50,6 +50,8 @@
 
 // backends for archiving the metrics
 #include "backends/backends.h"
+// the new exporting engine for archiving the metrics
+#include "exporting/exporting_engine.h"
 
 // the netdata API
 #include "web/api/web_api_v1.h"
diff --git a/daemon/config/README.md b/daemon/config/README.md
index 73fbf4fa5c..01913747d3 100644
--- a/daemon/config/README.md
+++ b/daemon/config/README.md
@@ -1,6 +1,7 @@
 <!--
 ---
 title: "Daemon configuration"
+date: 2020-03-31
 custom_edit_url: https://github.com/netdata/netdata/edit/master/daemon/config/README.md
 ---
 -->
@@ -220,7 +221,7 @@ For example, the `system.io` chart has the following default settings:
 These `dim` settings produce two dimensions, `in` and `out`, both of which use the `incremental` algorithm. By
 multiplying the value of `out` by -1, Netdata creates the negative values seen in the following area chart:
 
-![The system.io chart on a MacOS
+![The system.io chart on a macOS
 laptop](https://user-images.githubusercontent.com/1153921/69286708-2cfb3900-0bb1-11ea-9fcd-dd8fbb2adf11.png)
 
 [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdaemon%2Fconfig%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/daemon/main.c b/daemon/main.c
index 20ca7d883e..e0de2c7357 100644
--- a/daemon/main.c
+++ b/daemon/main.c
@@ -80,9 +80,7 @@ struct netdata_static_thread static_threads[] = {
 
         // common plugins for all systems
     {"BACKENDS",             NULL,                    NULL,         1, NULL, NULL, backends_main},
-#ifdef ENABLE_EXPORTING
     {"EXPORTING",            NULL,                    NULL,         1, NULL, NULL, exporting_main},
-#endif
     {"WEB_SERVER[static1]",  NULL,                    NULL,         0, NULL, NULL, socket_listen_main_static_threaded},
     {"STREAM",               NULL,                    NULL,         0, NULL, NULL, rrdpush_sender_thread},
 
diff --git a/docs/generator/requirements.txt b/docs/generator/requirements.txt
index ac01be7aef..b68297aa6a 100644
--- a/docs/generator/requirements.txt
+++ b/docs/generator/requirements.txt
@@ -1,2 +1,2 @@
 mkdocs>=1.0.1
-mkdocs-material
+mkdocs-material==4.6.3
diff --git a/docs/netdata-security.md b/docs/netdata-security.md
index f149f2d453..821aaf3646 100644
--- a/docs/netdata-security.md
+++ b/docs/netdata-security.md
@@ -81,7 +81,7 @@ You can bind Netdata to multiple IPs and ports. If you use hostnames, Netdata wi
 
 For cloud based installations, if your cloud provider does not provide such a private LAN (or if you use multiple providers), you can create a virtual management and administration LAN with tools like `tincd` or `gvpe`. These tools create a mesh VPN allowing all servers to communicate securely and privately. Your administration stations join this mesh VPN to get access to management and administration tasks on all your cloud servers.
 
-For `gvpe` we have developed a [simple provisioning tool](https://github.com/netdata/netdata-demo-site/tree/master/gvpe) you may find handy (it includes statically compiled `gvpe` binaries for Linux and FreeBSD, and also a script to compile `gvpe` on your Mac). We use this to create a management and administration LAN for all Netdata demo sites (spread all over the internet using multiple hosting providers).
+For `gvpe` we have developed a [simple provisioning tool](https://github.com/netdata/netdata-demo-site/tree/master/gvpe) you may find handy (it includes statically compiled `gvpe` binaries for Linux and FreeBSD, and also a script to compile `gvpe` on your macOS system). We use this to create a management and administration LAN for all Netdata demo sites (spread all over the internet using multiple hosting providers).
 
 ---
 
diff --git a/docs/step-by-step/step-00.md b/docs/step-by-step/step-00.md
index f01dcb6532..2ff5cced58 100644
--- a/docs/step-by-step/step-00.md
+++ b/docs/step-by-step/step-00.md
@@ -1,6 +1,7 @@
 <!--
 ---
 title: "The step-by-step Netdata tutorial"
+date: 2020-03-31
 custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/step-by-step/step-00.md
 ---
 -->
@@ -21,7 +22,7 @@ If you have monitoring experience, or would rather get straight into configuring
 straight into code and configurations with our [getting started guide](../getting-started.md).
 
 > This tutorial contains instructions for Netdata installed on a Linux system. Many of the instructions will work on
-> other supported operating systems, like FreeBSD and MacOS, but we can't make any guarantees.
+> other supported operating systems, like FreeBSD and macOS, but we can't make any guarantees.
 
 ## Where to go if you need help
 
diff --git a/docs/step-by-step/step-04.md b/docs/step-by-step/step-04.md
index d403e208d7..8b63be34cd 100644
--- a/docs/step-by-step/step-04.md
+++ b/docs/step-by-step/step-04.md
@@ -1,6 +1,7 @@
 <!--
 ---
 title: "Step 4. The basics of configuring Netdata"
+date: 2020-03-31
 custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/step-by-step/step-04.md
 ---
 -->
@@ -61,7 +62,7 @@ an example file to your Netdata config directory and then allow you to edit it b
 > change permanent](https://stackoverflow.com/questions/13046624/how-to-permanently-export-a-variable-in-linux).
 
 Let's give it a shot. Navigate to your Netdata config directory. To use `edit-config` on `netdata.conf`, you need to
-have permissions to edit the file. On Linux/MacOS systems, you can usually use `sudo` to elevate your permissions.
+have permissions to edit the file. On Linux/macOS systems, you can usually use `sudo` to elevate your permissions.
 
 ```bash
 cd /etc/netdata   # Replace this path with your Netdata config directory, if different as found in the steps above
diff --git a/docs/tutorials/collect-unbound-metrics.md b/docs/tutorials/collect-unbound-metrics.md
index 4fc7242d68..410bda84ce 100644
--- a/docs/tutorials/collect-unbound-metrics.md
+++ b/docs/tutorials/collect-unbound-metrics.md
@@ -1,6 +1,7 @@
 <!--
 ---
 title: "Monitor Unbound DNS servers with Netdata"
+date: 2020-03-31
 custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/tutorials/collect-unbound-metrics.md
 ---
 -->
@@ -11,7 +12,7 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/tutorials/c
 Labs. In v1.19 of Netdata, we release a completely refactored collector for collecting real-time metrics from Unbound
 servers and displaying them in Netdata dashboards.
 
-Unbound runs on FreeBSD, OpenBSD, NetBSD, MacOS, Linux, and Windows, and supports DNS-over-TLS, which ensures that DNS
+Unbound runs on FreeBSD, OpenBSD, NetBSD, macOS, Linux, and Windows, and supports DNS-over-TLS, which ensures that DNS
 queries and answers are all encrypted with TLS. In theory, that should reduce the risk of eavesdropping or
 man-in-the-middle attacks when communicating to DNS servers.
 
diff --git a/docs/what-is-netdata.md b/docs/what-is-netdata.md
index 0504efa1d1..2a30124ef6 100644
--- a/docs/what-is-netdata.md
+++ b/docs/what-is-netdata.md
@@ -1,6 +1,7 @@
 <!--
 ---
 title: "What is Netdata?"
+date: 2020-03-31
 custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/what-is-netdata.md
 ---
 -->
@@ -19,7 +20,7 @@ Netdata provides **unparalleled insights**, **in real-time**, of everything happ
 
 _Netdata is **fast** and **efficient**, designed to permanently run on all systems (**physical** & **virtual** servers, **containers**, **IoT** devices), without disrupting their core function._
 
-Netdata is **free, open-source software** and it currently runs on **Linux**, **FreeBSD**, and **MacOS**.
+Netdata is **free, open-source software** and it currently runs on **Linux**, **FreeBSD**, and **macOS**.
 
 ---
 
diff --git a/exporting/aws_kinesis/aws_kinesis.c b/exporting/aws_kinesis/aws_kinesis.c
index 4b0d5f74a4..938569a9eb 100644
--- a/exporting/aws_kinesis/aws_kinesis.c
+++ b/exporting/aws_kinesis/aws_kinesis.c
@@ -75,9 +75,23 @@ void aws_kinesis_connector_worker(void *instance_p)
         uv_mutex_lock(&instance->mutex);
         uv_cond_wait(&instance->cond_var, &instance->mutex);
 
+        // reset the monitoring chart counters
+        stats->received_bytes =
+        stats->sent_bytes =
+        stats->sent_metrics =
+        stats->lost_metrics =
+        stats->receptions =
+        stats->transmission_successes =
+        stats->transmission_failures =
+        stats->data_lost_events =
+        stats->lost_bytes =
+        stats->reconnects = 0;
+
         BUFFER *buffer = (BUFFER *)instance->buffer;
         size_t buffer_len = buffer_strlen(buffer);
 
+        stats->buffered_bytes = buffer_len;
+
         size_t sent = 0;
 
         while (sent < buffer_len) {
@@ -115,7 +129,7 @@ void aws_kinesis_connector_worker(void *instance_p)
                 connector_specific_data, connector_specific_config->stream_name, partition_key, first_char, record_len);
 
             sent += record_len;
-            stats->chart_transmission_successes++;
+            stats->transmission_successes++;
 
             size_t sent_bytes = 0, lost_bytes = 0;
 
@@ -127,30 +141,34 @@ void aws_kinesis_connector_worker(void *instance_p)
                     "EXPORTING: failed to write data to database backend '%s'. Willing to write %zu bytes, wrote %zu bytes.",
                     instance->config.destination, sent_bytes, sent_bytes - lost_bytes);
 
-                stats->chart_transmission_failures++;
-                stats->chart_data_lost_events++;
-                stats->chart_lost_bytes += lost_bytes;
+                stats->transmission_failures++;
+                stats->data_lost_events++;
+                stats->lost_bytes += lost_bytes;
 
                 // estimate the number of lost metrics
-                stats->chart_lost_metrics += (collected_number)(
-                    stats->chart_buffered_metrics *
+                stats->lost_metrics += (collected_number)(
+                    stats->buffered_metrics *
                     (buffer_len && (lost_bytes > buffer_len) ? (double)lost_bytes / buffer_len : 1));
 
                 break;
             } else {
-                stats->chart_receptions++;
+                stats->receptions++;
             }
 
             if (unlikely(netdata_exit))
                 break;
         }
 
-        stats->chart_sent_bytes += sent;
+        stats->sent_bytes += sent;
         if (likely(sent == buffer_len))
-            stats->chart_sent_metrics = stats->chart_buffered_metrics;
+            stats->sent_metrics = stats->buffered_metrics;
 
         buffer_flush(buffer);
 
+        send_internal_metrics(instance);
+
+        stats->buffered_metrics = 0;
+
         uv_mutex_unlock(&instance->mutex);
 
 #ifdef UNIT_TESTING
diff --git a/exporting/exporting_engine.c b/exporting/exporting_engine.c
index 0a38d66bbe..93347328cd 100644
--- a/exporting/exporting_engine.c
+++ b/exporting/exporting_engine.c
@@ -35,6 +35,11 @@ void *exporting_main(void *ptr)
         goto cleanup;
     }
 
+    RRDSET *st_main_rusage = NULL;
+    RRDDIM *rd_main_user = NULL;
+    RRDDIM *rd_main_system = NULL;
+    create_main_rusage_chart(&st_main_rusage, &rd_main_user, &rd_main_system);
+
     usec_t step_ut = localhost->rrd_update_every * USEC_PER_SEC;
     heartbeat_t hb;
     heartbeat_init(&hb);
@@ -55,10 +60,7 @@ void *exporting_main(void *ptr)
             break;
         }
 
-        if (send_internal_metrics(engine) != 0) {
-            error("EXPORTING: cannot send metrics for the operation of exporting engine");
-            break;
-        }
+        send_main_rusage(st_main_rusage, rd_main_user, rd_main_system);
 
 #ifdef UNIT_TESTING
         break;
diff --git a/exporting/exporting_engine.h b/exporting/exporting_engine.h
index 1a3a3ecd09..94daf98e05 100644
--- a/exporting/exporting_engine.h
+++ b/exporting/exporting_engine.h
@@ -14,10 +14,10 @@
 extern struct config exporting_config;
 
 #define EXPORTING_UPDATE_EVERY_OPTION_NAME "update every"
-#define EXPORTING_UPDATE_EVERY_DEFAULT     10
+#define EXPORTING_UPDATE_EVERY_DEFAULT 10
 
 typedef enum exporting_options {
-    EXPORTING_OPTION_NONE                   = 0,
+    EXPORTING_OPTION_NON                    = 0,
 
     EXPORTING_SOURCE_DATA_AS_COLLECTED      = (1 << 0),
     EXPORTING_SOURCE_DATA_AVERAGE           = (1 << 1),
@@ -42,10 +42,22 @@ typedef enum exporting_options {
      (instance->config.options & EXPORTING_OPTION_SEND_AUTOMATIC_LABELS &&                                             \
       label->label_source != LABEL_SOURCE_NETDATA_CONF))
 
+typedef enum exporting_connector_types {
+    EXPORTING_CONNECTOR_TYPE_UNKNOWN,                 // Invalid type
+    EXPORTING_CONNECTOR_TYPE_GRAPHITE,                // Send plain text to Graphite
+    EXPORTING_CONNECTOR_TYPE_OPENTSDB_USING_TELNET,   // Send data to OpenTSDB using telnet API
+    EXPORTING_CONNECTOR_TYPE_OPENTSDB_USING_HTTP,     // Send data to OpenTSDB using HTTP API
+    EXPORTING_CONNECTOR_TYPE_JSON,                    // Stores the data using JSON.
+    EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE, // The user selected to use Prometheus backend
+    EXPORTING_CONNECTOR_TYPE_KINESIS,                 // Send message to AWS Kinesis
+    EXPORTING_CONNECTOR_TYPE_MONGODB,                 // Send data to MongoDB collection
+    EXPORTING_CONNECTOR_TYPE_NUM                      // Number of backend types
+} EXPORTING_CONNECTOR_TYPE;
+
 struct engine;
 
 struct instance_config {
-    BACKEND_TYPE type;
+    EXPORTING_CONNECTOR_TYPE type;
 
     const char *name;
     const char *destination;
@@ -87,18 +99,42 @@ struct engine_config {
 };
 
 struct stats {
-    collected_number chart_buffered_metrics;
-    collected_number chart_lost_metrics;
-    collected_number chart_sent_metrics;
-    collected_number chart_buffered_bytes;
-    collected_number chart_received_bytes;
-    collected_number chart_sent_bytes;
-    collected_number chart_receptions;
-    collected_number chart_transmission_successes;
-    collected_number chart_transmission_failures;
-    collected_number chart_data_lost_events;
-    collected_number chart_lost_bytes;
-    collected_number chart_reconnects;
+    collected_number buffered_metrics;
+    collected_number lost_metrics;
+    collected_number sent_metrics;
+    collected_number buffered_bytes;
+    collected_number lost_bytes;
+    collected_number sent_bytes;
+    collected_number received_bytes;
+    collected_number transmission_successes;
+    collected_number data_lost_events;
+    collected_number reconnects;
+    collected_number transmission_failures;
+    collected_number receptions;
+
+    int initialized;
+
+    RRDSET *st_metrics;
+    RRDDIM *rd_buffered_metrics;
+    RRDDIM *rd_lost_metrics;
+    RRDDIM *rd_sent_metrics;
+
+    RRDSET *st_bytes;
+    RRDDIM *rd_buffered_bytes;
+    RRDDIM *rd_lost_bytes;
+    RRDDIM *rd_sent_bytes;
+    RRDDIM *rd_received_bytes;
+
+    RRDSET *st_ops;
+    RRDDIM *rd_transmission_successes;
+    RRDDIM *rd_data_lost_events;
+    RRDDIM *rd_reconnects;
+    RRDDIM *rd_transmission_failures;
+    RRDDIM *rd_receptions;
+
+    RRDSET *st_rusage;
+    RRDDIM *rd_user;
+    RRDDIM *rd_system;
 };
 
 struct instance {
@@ -150,10 +186,12 @@ struct engine {
     struct instance *instance_root;
 };
 
+extern struct instance *prometheus_exporter_instance;
+
 void *exporting_main(void *ptr);
 
 struct engine *read_exporting_config();
-BACKEND_TYPE exporting_select_type(const char *type);
+EXPORTING_CONNECTOR_TYPE exporting_select_type(const char *type);
 
 int init_connectors(struct engine *engine);
 
@@ -179,12 +217,17 @@ int end_chart_formatting(struct engine *engine, RRDSET *st);
 int end_host_formatting(struct engine *engine, RRDHOST *host);
 int end_batch_formatting(struct engine *engine);
 int flush_host_labels(struct instance *instance, RRDHOST *host);
+int simple_connector_update_buffered_bytes(struct instance *instance);
 
 int exporting_discard_response(BUFFER *buffer, struct instance *instance);
 void simple_connector_receive_response(int *sock, struct instance *instance);
 void simple_connector_send_buffer(int *sock, int *failures, struct instance *instance);
 void simple_connector_worker(void *instance_p);
 
-int send_internal_metrics(struct engine *engine);
+void create_main_rusage_chart(RRDSET **st_rusage, RRDDIM **rd_user, RRDDIM **rd_system);
+void send_main_rusage(RRDSET *st_rusage, RRDDIM *rd_user, RRDDIM *rd_system);
+void send_internal_metrics(struct instance *instance);
+
+#include "exporting/prometheus/prometheus.h"
 
 #endif /* NETDATA_EXPORTING_ENGINE_H */
diff --git a/exporting/graphite/graphite.c b/exporting/graphite/graphite.c
index f815bff89d..d3f928583c 100644
--- a/exporting/graphite/graphite.c
+++ b/exporting/graphite/graphite.c
@@ -27,7 +27,7 @@ int init_graphite_instance(struct instance *instance)
 
     instance->end_chart_formatting = NULL;
     instance->end_host_formatting = flush_host_labels;
-    instance->end_batch_formatting = NULL;
+    instance->end_batch_formatting = simple_connector_update_buffered_bytes;
 
     instance->send_header = NULL;
     instance->check_response = exporting_discard_response;
diff --git a/exporting/init_connectors.c b/exporting/init_connectors.c
index 798101fd9c..0db0ca1354 100644
--- a/exporting/init_connectors.c
+++ b/exporting/init_connectors.c
@@ -32,35 +32,35 @@ int init_connectors(struct engine *engine)
         instance->after = engine->now;
 
         switch (instance->config.type) {
-            case BACKEND_TYPE_GRAPHITE:
+            case EXPORTING_CONNECTOR_TYPE_GRAPHITE:
                 if (init_graphite_instance(instance) != 0)
                     return 1;
                 break;
-            case BACKEND_TYPE_JSON:
+            case EXPORTING_CONNECTOR_TYPE_JSON:
                 if (init_json_instance(instance) != 0)
                     return 1;
                 break;
-            case BACKEND_TYPE_OPENTSDB_USING_TELNET:
+            case EXPORTING_CONNECTOR_TYPE_OPENTSDB_USING_TELNET:
                 if (init_opentsdb_telnet_instance(instance) != 0)
                     return 1;
                 break;
-            case BACKEND_TYPE_OPENTSDB_USING_HTTP:
+            case EXPORTING_CONNECTOR_TYPE_OPENTSDB_USING_HTTP:
                 if (init_opentsdb_http_instance(instance) != 0)
                     return 1;
                 break;
-            case BACKEND_TYPE_PROMETHEUS_REMOTE_WRITE:
+            case EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE:
 #if ENABLE_PROMETHEUS_REMOTE_WRITE
                 if (init_prometheus_remote_write_instance(instance) != 0)
                     return 1;
 #endif
                 break;
-            case BACKEND_TYPE_KINESIS:
+            case EXPORTING_CONNECTOR_TYPE_KINESIS:
 #if HAVE_KINESIS
                 if (init_aws_kinesis_instance(instance) != 0)
                     return 1;
 #endif
                 break;
-            case BACKEND_TYPE_MONGODB:
+            case EXPORTING_CONNECTOR_TYPE_MONGODB:
 #if HAVE_MONGOC
                 if (init_mongodb_instance(instance) != 0)
                     return 1;
@@ -77,7 +77,7 @@ int init_connectors(struct engine *engine)
             error("EXPORTING: cannot create tread worker. uv_thread_create(): %s", uv_strerror(error));
             return 1;
         }
-        char threadname[NETDATA_THREAD_NAME_MAX+1];
+        char threadname[NETDATA_THREAD_NAME_MAX + 1];
         snprintfz(threadname, NETDATA_THREAD_NAME_MAX, "EXPORTING-%zu", instance->index);
         uv_thread_set_name_np(instance->thread, threadname);
     }
diff --git a/exporting/json/json.c b/exporting/json/json.c
index 9886b55567..b334804cff 100644
--- a/exporting/json/json.c
+++ b/exporting/json/json.c
@@ -27,7 +27,7 @@ int init_json_instance(struct instance *instance)
 
     instance->end_chart_formatting = NULL;
     instance->end_host_formatting = flush_host_labels;
-    instance->end_batch_formatting = NULL;
+    instance->end_batch_formatting = simple_connector_update_buffered_bytes;
 
     instance->send_header = NULL;
     instance->check_response = exporting_discard_response;
diff --git a/exporting/mongodb/mongodb.c b/exporting/mongodb/mongodb.c
index b10a8fa664..f20c4f1c80 100644
--- a/exporting/mongodb/mongodb.c
+++ b/exporting/mongodb/mongodb.c
@@ -183,8 +183,10 @@ int format_batch_mongodb(struct instance *instance)
         // ring buffer is full, reuse the oldest element
         connector_specific_data->first_buffer = connector_specific_data->first_buffer->next;
         free_bson(insert, connector_specific_data->last_buffer->documents_inserted);
+        connector_specific_data->total_documents_inserted -= connector_specific_data->last_buffer->documents_inserted;
+        stats->buffered_bytes -= connector_specific_data->last_buffer->buffered_bytes;
     }
-    insert = callocz((size_t)stats->chart_buffered_metrics, sizeof(bson_t *));
+    insert = callocz((size_t)stats->buffered_metrics, sizeof(bson_t *));
     connector_specific_data->last_buffer->insert = insert;
 
     BUFFER *buffer = (BUFFER *)instance->buffer;
@@ -193,7 +195,7 @@ int format_batch_mongodb(struct instance *instance)
 
     size_t documents_inserted = 0;
 
-    while (*end && documents_inserted <= (size_t)stats->chart_buffered_metrics) {
+    while (*end && documents_inserted <= (size_t)stats->buffered_metrics) {
         while (*end && *end != '\n')
             end++;
 
@@ -208,7 +210,8 @@ int format_batch_mongodb(struct instance *instance)
         insert[documents_inserted] = bson_new_from_json((const uint8_t *)start, -1, &bson_error);
 
         if (unlikely(!insert[documents_inserted])) {
-            error("EXPORTING: %s", bson_error.message);
+            error(
+                "EXPORTING: Failed creating a BSON document from a JSON string \"%s\" : %s", start, bson_error.message);
             free_bson(insert, documents_inserted);
             return 1;
         }
@@ -218,8 +221,16 @@ int format_batch_mongodb(struct instance *instance)
         documents_inserted++;
     }
 
+    stats->buffered_bytes += connector_specific_data->last_buffer->buffered_bytes = buffer_strlen(buffer);
+
     buffer_flush(buffer);
 
+    // The stats->buffered_metrics is used in the MongoDB batch formatting as a variable for the number
+    // of metrics, added in the current iteration, so we are clearing it here. We will use the
+    // connector_specific_data->total_documents_inserted in the worker to show the statistics.
+    stats->buffered_metrics = 0;
+    connector_specific_data->total_documents_inserted += documents_inserted;
+
     connector_specific_data->last_buffer->documents_inserted = documents_inserted;
     connector_specific_data->last_buffer = connector_specific_data->last_buffer->next;
 
@@ -246,11 +257,25 @@ void mongodb_connector_worker(void *instance_p)
         uv_mutex_lock(&instance->mutex);
         uv_cond_wait(&instance->cond_var, &instance->mutex);
 
+        // reset the monitoring chart counters
+        stats->received_bytes =
+        stats->sent_bytes =
+        stats->sent_metrics =
+        stats->lost_metrics =
+        stats->receptions =
+        stats->transmission_successes =
+        stats->transmission_failures =
+        stats->data_lost_events =
+        stats->lost_bytes =
+        stats->reconnects = 0;
+
         bson_t **insert = connector_specific_data->first_buffer->insert;
         size_t documents_inserted = connector_specific_data->first_buffer->documents_inserted;
+        size_t buffered_bytes = connector_specific_data->first_buffer->buffered_bytes;
 
         connector_specific_data->first_buffer->insert = NULL;
         connector_specific_data->first_buffer->documents_inserted = 0;
+        connector_specific_data->first_buffer->buffered_bytes = 0;
         connector_specific_data->first_buffer = connector_specific_data->first_buffer->next;
 
         uv_mutex_unlock(&instance->mutex);
@@ -279,9 +304,10 @@ void mongodb_connector_worker(void *instance_p)
                 NULL,
                 NULL,
                 &bson_error))) {
-            stats->chart_sent_bytes += data_size;
-            stats->chart_transmission_successes++;
-            stats->chart_receptions++;
+            stats->sent_metrics = documents_inserted;
+            stats->sent_bytes += data_size;
+            stats->transmission_successes++;
+            stats->receptions++;
         } else {
             // oops! we couldn't send (all or some of the) data
             error("EXPORTING: %s", bson_error.message);
@@ -290,10 +316,10 @@ void mongodb_connector_worker(void *instance_p)
                 "Willing to write %zu bytes, wrote %zu bytes.",
                 instance->config.destination, data_size, 0UL);
 
-            stats->chart_transmission_failures++;
-            stats->chart_data_lost_events++;
-            stats->chart_lost_bytes += data_size;
-            stats->chart_lost_metrics += stats->chart_buffered_metrics;
+            stats->transmission_failures++;
+            stats->data_lost_events++;
+            stats->lost_bytes += buffered_bytes;
+            stats->lost_metrics += documents_inserted;
         }
 
         free_bson(insert, documents_inserted);
@@ -301,8 +327,18 @@ void mongodb_connector_worker(void *instance_p)
         if (unlikely(netdata_exit))
             break;
 
-        stats->chart_sent_bytes += data_size;
-        stats->chart_sent_metrics = stats->chart_buffered_metrics;
+        uv_mutex_lock(&instance->mutex);
+
+        stats->buffered_metrics = connector_specific_data->total_documents_inserted;
+
+        send_internal_metrics(instance);
+
+        connector_specific_data->total_documents_inserted -= documents_inserted;
+
+        stats->buffered_metrics = 0;
+        stats->buffered_bytes -= buffered_bytes;
+
+        uv_mutex_unlock(&instance->mutex);
 
 #ifdef UNIT_TESTING
         break;
diff --git a/exporting/mongodb/mongodb.h b/exporting/mongodb/mongodb.h
index 0f23705f55..5116e66fab 100644
--- a/exporting/mongodb/mongodb.h
+++ b/exporting/mongodb/mongodb.h
@@ -10,6 +10,7 @@
 struct bson_buffer {
     bson_t **insert;
     size_t documents_inserted;
+    size_t buffered_bytes;
 
     struct bson_buffer *next;
 };
@@ -18,6 +19,8 @@ struct mongodb_specific_data {
     mongoc_client_t *client;
     mongoc_collection_t *collection;
 
+    size_t total_documents_inserted;
+
     bson_t **current_insert;
     struct bson_buffer *first_buffer;
     struct bson_buffer *last_buffer;
diff --git a/exporting/opentsdb/opentsdb.c b/exporting/opentsdb/opentsdb.c
index 54f3c3c04d..2d5b2db698 100644
--- a/exporting/opentsdb/opentsdb.c
+++ b/exporting/opentsdb/opentsdb.c
@@ -27,7 +27,7 @@ int init_opentsdb_telnet_instance(struct instance *instance)
 
     instance->end_chart_formatting = NULL;
     instance->end_host_formatting = flush_host_labels;
-    instance->end_batch_formatting = NULL;
+    instance->end_batch_formatting = simple_connector_update_buffered_bytes;
 
     instance->send_header = NULL;
     instance->check_response = exporting_discard_response;
@@ -68,7 +68,7 @@ int init_opentsdb_http_instance(struct instance *instance)
 
     instance->end_chart_formatting = NULL;
     instance->end_host_formatting = flush_host_labels;
-    instance->end_batch_formatting = NULL;
+    instance->end_batch_formatting = simple_connector_update_buffered_bytes;
 
     instance->send_header = NULL;
     instance->check_response = exporting_discard_response;
diff --git a/exporting/process_data.c b/exporting/process_data.c
index c902aabdda..f2442e701c 100644
--- a/exporting/process_data.c
+++ b/exporting/process_data.c
@@ -206,7 +206,7 @@ int start_host_formatting(struct engine *engine, RRDHOST *host)
  * Start chart formatting for every connector instance's buffer
  *
  * @param engine an engine data structure.
- * @param a chart.
+ * @param st a chart.
  * @return Returns 0 on success, 1 on failure.
  */
 int start_chart_formatting(struct engine *engine, RRDSET *st)
@@ -242,7 +242,7 @@ int metric_formatting(struct engine *engine, RRDDIM *rd)
                 error("EXPORTING: cannot format metric for %s", instance->config.name);
                 return 1;
             }
-            instance->stats.chart_buffered_metrics++;
+            instance->stats.buffered_metrics++;
         }
     }
 
@@ -389,6 +389,19 @@ int flush_host_labels(struct instance *instance, RRDHOST *host)
     return 0;
 }
 
+/**
+ * Update stats for buffered bytes
+ *
+ * @param instance an instance data structure.
+ * @return Always returns 0.
+ */
+int simple_connector_update_buffered_bytes(struct instance *instance)
+{
+    instance->stats.buffered_bytes = (collected_number)buffer_strlen((BUFFER *)(instance->buffer));
+
+    return 0;
+}
+
 /**
  * Notify workers
  *
diff --git a/exporting/prometheus/prometheus.c b/exporting/prometheus/prometheus.c
index 7e91a0c962..f25ceed9d5 100644
--- a/exporting/prometheus/prometheus.c
+++ b/exporting/prometheus/prometheus.c
@@ -7,10 +7,16 @@
 // PROMETHEUS
 // /api/v1/allmetrics?format=prometheus and /api/v1/allmetrics?format=prometheus_all_hosts
 
+/**
+ * Check if a chart can be sent to an external databese
+ *
+ * @param instance an instance data structure.
+ * @param st a chart.
+ * @return Returns 1 if the chart can be sent, 0 otherwise.
+ */
 inline int can_send_rrdset(struct instance *instance, RRDSET *st)
 {
     RRDHOST *host = st->rrdhost;
-    (void)host;
 
     if (unlikely(rrdset_flag_check(st, RRDSET_FLAG_BACKEND_IGNORE)))
         return 0;
@@ -24,7 +30,7 @@ inline int can_send_rrdset(struct instance *instance, RRDSET *st)
             rrdset_flag_set(st, RRDSET_FLAG_BACKEND_IGNORE);
             debug(
                 D_BACKEND,
-                "BACKEND: not sending chart '%s' of host '%s', because it is disabled for backends.",
+                "EXPORTING: not sending chart '%s' of host '%s', because it is disabled for exporting.",
                 st->id,
                 host->hostname);
             return 0;
@@ -34,7 +40,7 @@ inline int can_send_rrdset(struct instance *instance, RRDSET *st)
     if (unlikely(!rrdset_is_available_for_backends(st))) {
         debug(
             D_BACKEND,
-            "BACKEND: not sending chart '%s' of host '%s', because it is not available for backends.",
+            "EXPORTING: not sending chart '%s' of host '%s', because it is not available for exporting.",
             st->id,
             host->hostname);
         return 0;
@@ -42,10 +48,10 @@ inline int can_send_rrdset(struct instance *instance, RRDSET *st)
 
     if (unlikely(
             st->rrd_memory_mode == RRD_MEMORY_MODE_NONE &&
-            !(BACKEND_OPTIONS_DATA_SOURCE(instance->config.options) == BACKEND_SOURCE_DATA_AS_COLLECTED))) {
+            !(EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED))) {
         debug(
             D_BACKEND,
-            "BACKEND: not sending chart '%s' of host '%s' because its memory mode is '%s' and the backend requires database access.",
+            "EXPORTING: not sending chart '%s' of host '%s' because its memory mode is '%s' and the exporting connector requires database access.",
             st->id,
             host->hostname,
             rrd_memory_mode_name(host->rrd_memory_mode));
@@ -63,8 +69,19 @@ static struct prometheus_server {
     struct prometheus_server *next;
 } *prometheus_server_root = NULL;
 
+/**
+ * Get the last time when a Prometheus server scraped the Netdata Prometheus exporter.
+ *
+ * @param server the name of the Prometheus server.
+ * @param host a data collecting host.
+ * @param now actual time.
+ * @return Returns the last time when the server accessed Netdata, or 0 if it is the first occurrence.
+ */
 static inline time_t prometheus_server_last_access(const char *server, RRDHOST *host, time_t now)
 {
+#ifdef UNIT_TESTING
+    return 0;
+#endif
     static netdata_mutex_t prometheus_server_root_mutex = NETDATA_MUTEX_INITIALIZER;
 
     uint32_t hash = simple_hash(server);
@@ -93,6 +110,14 @@ static inline time_t prometheus_server_last_access(const char *server, RRDHOST *
     return 0;
 }
 
+/**
+ * Copy and sanitize name.
+ *
+ * @param d a destination string.
+ * @param s a source sting.
+ * @param usable the number of characters to copy.
+ * @return Returns the length of the copied string.
+ */
 inline size_t prometheus_name_copy(char *d, const char *s, size_t usable)
 {
     size_t n;
@@ -110,6 +135,14 @@ inline size_t prometheus_name_copy(char *d, const char *s, size_t usable)
     return n;
 }
 
+/**
+ * Copy and sanitize label.
+ *
+ * @param d a destination string.
+ * @param s a source sting.
+ * @param usable the number of characters to copy.
+ * @return Returns the length of the copied string.
+ */
 inline size_t prometheus_label_copy(char *d, const char *s, size_t usable)
 {
     size_t n;
@@ -131,6 +164,15 @@ inline size_t prometheus_label_copy(char *d, const char *s, size_t usable)
     return n;
 }
 
+/**
+ * Copy and sanitize units.
+ *
+ * @param d a destination string.
+ * @param s a source sting.
+ * @param usable the number of characters to copy.
+ * @param showoldunits set this flag to 1 to show old (before v1.12) units.
+ * @return Returns the destination string.
+ */
 inline char *prometheus_units_copy(char *d, const char *s, size_t usable, int showoldunits)
 {
     const char *sorig = s;
@@ -203,6 +245,43 @@ inline char *prometheus_units_copy(char *d, const char *s, size_t usable, int sh
     return ret;
 }
 
+/**
+ * Format host labels for the Prometheus exporter
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ */
+void format_host_labels_prometheus(struct instance *instance, RRDHOST *host)
+{
+    if (unlikely(!sending_labels_configured(instance)))
+        return;
+
+    if (!instance->labels)
+        instance->labels = buffer_create(1024);
+
+    int count = 0;
+    rrdhost_check_rdlock(host);
+    netdata_rwlock_rdlock(&host->labels_rwlock);
+    for (struct label *label = host->labels; label; label = label->next) {
+        if (!should_send_label(instance, label))
+            continue;
+
+        char key[PROMETHEUS_ELEMENT_MAX + 1];
+        char value[PROMETHEUS_ELEMENT_MAX + 1];
+
+        prometheus_name_copy(key, label->key, PROMETHEUS_ELEMENT_MAX);
+        prometheus_label_copy(value, label->value, PROMETHEUS_ELEMENT_MAX);
+
+        if (*key && *value) {
+            if (count > 0)
+                buffer_strcat(instance->labels, ",");
+            buffer_sprintf(instance->labels, "%s=\"%s\"", key, value);
+            count++;
+        }
+    }
+    netdata_rwlock_unlock(&host->labels_rwlock);
+}
+
 struct host_variables_callback_options {
     RRDHOST *host;
     BUFFER *wb;
@@ -215,6 +294,13 @@ struct host_variables_callback_options {
     char name[PROMETHEUS_VARIABLE_MAX + 1];
 };
 
+/**
+ * Print host variables.
+ *
+ * @param rv a variable.
+ * @param data callback options.
+ * @return Returns 1 if the chart can be sent, 0 otherwise.
+ */
 static int print_host_variables(RRDVAR *rv, void *data)
 {
     struct host_variables_callback_options *opts = data;
@@ -274,14 +360,23 @@ static int print_host_variables(RRDVAR *rv, void *data)
     return 0;
 }
 
+/**
+ * Write metrics in Prometheus format to a buffer.
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ * @param wb the buffer to fill with metrics.
+ * @param prefix a prefix for every metric.
+ * @param exporting_options options to configure what data is exported.
+ * @param allhosts set to 1 if host instance should be in the output for tags.
+ * @param output_options options to configure the format of the output.
+ */
 static void rrd_stats_api_v1_charts_allmetrics_prometheus(
     struct instance *instance,
     RRDHOST *host,
     BUFFER *wb,
     const char *prefix,
     EXPORTING_OPTIONS exporting_options,
-    time_t after,
-    time_t before,
     int allhosts,
     PROMETHEUS_OUTPUT_OPTIONS output_options)
 {
@@ -290,31 +385,33 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(
     char hostname[PROMETHEUS_ELEMENT_MAX + 1];
     prometheus_label_copy(hostname, host->hostname, PROMETHEUS_ELEMENT_MAX);
 
+    format_host_labels_prometheus(instance, host);
+
+    if (output_options & PROMETHEUS_OUTPUT_TIMESTAMPS)
+        buffer_sprintf(
+            wb,
+            "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1 %llu\n",
+            hostname,
+            host->program_name,
+            host->program_version,
+            now_realtime_usec() / USEC_PER_MS);
+    else
+        buffer_sprintf(
+            wb,
+            "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1\n",
+            hostname,
+            host->program_name,
+            host->program_version);
+
     char labels[PROMETHEUS_LABELS_MAX + 1] = "";
     if (allhosts) {
-        if (output_options & PROMETHEUS_OUTPUT_TIMESTAMPS)
-            buffer_sprintf(
-                wb,
-                "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1 %llu\n",
-                hostname,
-                host->program_name,
-                host->program_version,
-                now_realtime_usec() / USEC_PER_MS);
-        else
-            buffer_sprintf(
-                wb,
-                "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1\n",
-                hostname,
-                host->program_name,
-                host->program_version);
-
-        if (host->tags && *(host->tags)) {
+        if (instance->labels && buffer_tostring(instance->labels)) {
             if (output_options & PROMETHEUS_OUTPUT_TIMESTAMPS) {
                 buffer_sprintf(
                     wb,
                     "netdata_host_tags_info{instance=\"%s\",%s} 1 %llu\n",
                     hostname,
-                    host->tags,
+                    buffer_tostring(instance->labels),
                     now_realtime_usec() / USEC_PER_MS);
 
                 // deprecated, exists only for compatibility with older queries
@@ -322,50 +419,46 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(
                     wb,
                     "netdata_host_tags{instance=\"%s\",%s} 1 %llu\n",
                     hostname,
-                    host->tags,
+                    buffer_tostring(instance->labels),
                     now_realtime_usec() / USEC_PER_MS);
             } else {
-                buffer_sprintf(wb, "netdata_host_tags_info{instance=\"%s\",%s} 1\n", hostname, host->tags);
+                buffer_sprintf(
+                    wb, "netdata_host_tags_info{instance=\"%s\",%s} 1\n", hostname, buffer_tostring(instance->labels));
 
                 // deprecated, exists only for compatibility with older queries
-                buffer_sprintf(wb, "netdata_host_tags{instance=\"%s\",%s} 1\n", hostname, host->tags);
+                buffer_sprintf(
+                    wb, "netdata_host_tags{instance=\"%s\",%s} 1\n", hostname, buffer_tostring(instance->labels));
             }
         }
 
         snprintfz(labels, PROMETHEUS_LABELS_MAX, ",instance=\"%s\"", hostname);
     } else {
-        if (output_options & PROMETHEUS_OUTPUT_TIMESTAMPS)
-            buffer_sprintf(
-                wb,
-                "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1 %llu\n",
-                hostname,
-                host->program_name,
-                host->program_version,
-                now_realtime_usec() / USEC_PER_MS);
-        else
-            buffer_sprintf(
-                wb,
-                "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1\n",
-                hostname,
-                host->program_name,
-                host->program_version);
-
-        if (host->tags && *(host->tags)) {
+        if (instance->labels && buffer_tostring(instance->labels)) {
             if (output_options & PROMETHEUS_OUTPUT_TIMESTAMPS) {
                 buffer_sprintf(
-                    wb, "netdata_host_tags_info{%s} 1 %llu\n", host->tags, now_realtime_usec() / USEC_PER_MS);
+                    wb,
+                    "netdata_host_tags_info{%s} 1 %llu\n",
+                    buffer_tostring(instance->labels),
+                    now_realtime_usec() / USEC_PER_MS);
 
                 // deprecated, exists only for compatibility with older queries
-                buffer_sprintf(wb, "netdata_host_tags{%s} 1 %llu\n", host->tags, now_realtime_usec() / USEC_PER_MS);
+                buffer_sprintf(
+                    wb,
+                    "netdata_host_tags{%s} 1 %llu\n",
+                    buffer_tostring(instance->labels),
+                    now_realtime_usec() / USEC_PER_MS);
             } else {
-                buffer_sprintf(wb, "netdata_host_tags_info{%s} 1\n", host->tags);
+                buffer_sprintf(wb, "netdata_host_tags_info{%s} 1\n", buffer_tostring(instance->labels));
 
                 // deprecated, exists only for compatibility with older queries
-                buffer_sprintf(wb, "netdata_host_tags{%s} 1\n", host->tags);
+                buffer_sprintf(wb, "netdata_host_tags{%s} 1\n", buffer_tostring(instance->labels));
             }
         }
     }
 
+    if (instance->labels)
+        buffer_flush(instance->labels);
+
     // send custom variables set for the host
     if (output_options & PROMETHEUS_OUTPUT_VARIABLES) {
         struct host_variables_callback_options opts = { .host = host,
@@ -383,20 +476,20 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(
     RRDSET *st;
     rrdset_foreach_read(st, host)
     {
-        char chart[PROMETHEUS_ELEMENT_MAX + 1];
-        char context[PROMETHEUS_ELEMENT_MAX + 1];
-        char family[PROMETHEUS_ELEMENT_MAX + 1];
-
-        prometheus_label_copy(
-            chart, (output_options & PROMETHEUS_OUTPUT_NAMES && st->name) ? st->name : st->id, PROMETHEUS_ELEMENT_MAX);
-        prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
-        prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
 
         if (likely(can_send_rrdset(instance, st))) {
             rrdset_rdlock(st);
 
+            char chart[PROMETHEUS_ELEMENT_MAX + 1];
+            char context[PROMETHEUS_ELEMENT_MAX + 1];
+            char family[PROMETHEUS_ELEMENT_MAX + 1];
             char units[PROMETHEUS_ELEMENT_MAX + 1] = "";
 
+            prometheus_label_copy(
+                chart, (output_options & PROMETHEUS_OUTPUT_NAMES && st->name) ? st->name : st->id, PROMETHEUS_ELEMENT_MAX);
+            prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
+            prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
+
             int as_collected = (EXPORTING_OPTIONS_DATA_SOURCE(exporting_options) == EXPORTING_SOURCE_DATA_AS_COLLECTED);
             int homogeneous = 1;
             if (as_collected) {
@@ -433,7 +526,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(
                     if (as_collected) {
                         // we need as-collected / raw data
 
-                        if (unlikely(rd->last_collected_time.tv_sec < after))
+                        if (unlikely(rd->last_collected_time.tv_sec < instance->after))
                             continue;
 
                         const char *t = "gauge", *h = "gives";
@@ -562,8 +655,9 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(
                     } else {
                         // we need average or sum of the data
 
-                        time_t first_t = after, last_t = before;
-                        calculated_number value = exporting_calculate_value_from_stored_data(instance, rd, &last_t);
+                        time_t first_time = instance->after;
+                        time_t last_time = instance->before;
+                        calculated_number value = exporting_calculate_value_from_stored_data(instance, rd, &last_time);
 
                         if (!isnan(value) && !isinf(value)) {
                             if (EXPORTING_OPTIONS_DATA_SOURCE(exporting_options) == EXPORTING_SOURCE_DATA_AVERAGE)
@@ -586,8 +680,8 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(
                                     suffix,
                                     (output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id,
                                     st->units,
-                                    (unsigned long long)first_t,
-                                    (unsigned long long)last_t);
+                                    (unsigned long long)first_time,
+                                    (unsigned long long)last_time);
 
                             if (unlikely(output_options & PROMETHEUS_OUTPUT_TYPES))
                                 buffer_sprintf(wb, "# COMMENT TYPE %s_%s%s%s gauge\n", prefix, context, units, suffix);
@@ -606,7 +700,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(
                                     dimension,
                                     labels,
                                     value,
-                                    last_t * MSEC_PER_SEC);
+                                    last_time * MSEC_PER_SEC);
                             else
                                 buffer_sprintf(
                                     wb,
@@ -633,6 +727,18 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(
     rrdhost_unlock(host);
 }
 
+/**
+ * Get the last time time when a server accessed Netdata. Write information about an API request to a buffer.
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ * @param wb the buffer to write to.
+ * @param exporting_options options to configure what data is exported.
+ * @param server the name of a Prometheus server..
+ * @param now actual time.
+ * @param output_options options to configure the format of the output.
+ * @return Returns the last time when the server accessed Netdata.
+ */
 static inline time_t prometheus_preparation(
     struct instance *instance,
     RRDHOST *host,
@@ -649,13 +755,13 @@ static inline time_t prometheus_preparation(
 
     int first_seen = 0;
     if (!after) {
-        after = now - instance->engine->config.update_every;
+        after = now - instance->config.update_every;
         first_seen = 1;
     }
 
     if (after > now) {
         // oops! this should never happen
-        after = now - instance->engine->config.update_every;
+        after = now - instance->config.update_every;
     }
 
     if (output_options & PROMETHEUS_OUTPUT_HELP) {
@@ -685,8 +791,17 @@ static inline time_t prometheus_preparation(
     return after;
 }
 
+/**
+ * Write metrics and auxiliary information for one host to a buffer.
+ *
+ * @param host a data collecting host.
+ * @param wb the buffer to write to.
+ * @param server the name of a Prometheus server.
+ * @param prefix a prefix for every metric.
+ * @param exporting_options options to configure what data is exported.
+ * @param output_options options to configure the format of the output.
+ */
 void rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(
-    struct instance *instance,
     RRDHOST *host,
     BUFFER *wb,
     const char *server,
@@ -694,17 +809,36 @@ void rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(
     EXPORTING_OPTIONS exporting_options,
     PROMETHEUS_OUTPUT_OPTIONS output_options)
 {
-    time_t before = now_realtime_sec();
+    if (unlikely(!prometheus_exporter_instance))
+        return;
+
+    prometheus_exporter_instance->before = now_realtime_sec();
 
     // we start at the point we had stopped before
-    time_t after = prometheus_preparation(instance, host, wb, exporting_options, server, before, output_options);
+    prometheus_exporter_instance->after = prometheus_preparation(
+        prometheus_exporter_instance,
+        host,
+        wb,
+        exporting_options,
+        server,
+        prometheus_exporter_instance->before,
+        output_options);
 
     rrd_stats_api_v1_charts_allmetrics_prometheus(
-        instance, host, wb, prefix, exporting_options, after, before, 0, output_options);
+        prometheus_exporter_instance, host, wb, prefix, exporting_options, 0, output_options);
 }
 
+/**
+ * Write metrics and auxiliary information for all hosts to a buffer.
+ *
+ * @param host a data collecting host.
+ * @param wb the buffer to write to.
+ * @param server the name of a Prometheus server.
+ * @param prefix a prefix for every metric.
+ * @param exporting_options options to configure what data is exported.
+ * @param output_options options to configure the format of the output.
+ */
 void rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(
-    struct instance *instance,
     RRDHOST *host,
     BUFFER *wb,
     const char *server,
@@ -712,16 +846,26 @@ void rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(
     EXPORTING_OPTIONS exporting_options,
     PROMETHEUS_OUTPUT_OPTIONS output_options)
 {
-    time_t before = now_realtime_sec();
+    if (unlikely(!prometheus_exporter_instance))
+        return;
+
+    prometheus_exporter_instance->before = now_realtime_sec();
 
     // we start at the point we had stopped before
-    time_t after = prometheus_preparation(instance, host, wb, exporting_options, server, before, output_options);
+    prometheus_exporter_instance->after = prometheus_preparation(
+        prometheus_exporter_instance,
+        host,
+        wb,
+        exporting_options,
+        server,
+        prometheus_exporter_instance->before,
+        output_options);
 
     rrd_rdlock();
     rrdhost_foreach_read(host)
     {
         rrd_stats_api_v1_charts_allmetrics_prometheus(
-            instance, host, wb, prefix, exporting_options, after, before, 1, output_options);
+            prometheus_exporter_instance, host, wb, prefix, exporting_options, 1, output_options);
     }
     rrd_unlock();
 }
diff --git a/exporting/prometheus/prometheus.h b/exporting/prometheus/prometheus.h
index b947633deb..85bcc7a7f8 100644
--- a/exporting/prometheus/prometheus.h
+++ b/exporting/prometheus/prometheus.h
@@ -23,10 +23,10 @@ typedef enum prometheus_output_flags {
 } PROMETHEUS_OUTPUT_OPTIONS;
 
 extern void rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(
-    struct instance *instance, RRDHOST *host, BUFFER *wb, const char *server, const char *prefix,
+    RRDHOST *host, BUFFER *wb, const char *server, const char *prefix,
     EXPORTING_OPTIONS exporting_options, PROMETHEUS_OUTPUT_OPTIONS output_options);
 extern void rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(
-    struct instance *instance, RRDHOST *host, BUFFER *wb, const char *server, const char *prefix,
+    RRDHOST *host, BUFFER *wb, const char *server, const char *prefix,
     EXPORTING_OPTIONS exporting_options, PROMETHEUS_OUTPUT_OPTIONS output_options);
 
 int can_send_rrdset(struct instance *instance, RRDSET *st);
@@ -34,4 +34,6 @@ size_t prometheus_name_copy(char *d, const char *s, size_t usable);
 size_t prometheus_label_copy(char *d, const char *s, size_t usable);
 char *prometheus_units_copy(char *d, const char *s, size_t usable, int showoldunits);
 
+void format_host_labels_prometheus(struct instance *instance, RRDHOST *host);
+
 #endif //NETDATA_EXPORTING_PROMETHEUS_H
diff --git a/exporting/prometheus/remote_write/remote_write.c b/exporting/prometheus/remote_write/remote_write.c
index 248a053c21..12019e2286 100644
--- a/exporting/prometheus/remote_write/remote_write.c
+++ b/exporting/prometheus/remote_write/remote_write.c
@@ -314,7 +314,7 @@ int format_batch_prometheus_remote_write(struct instance *instance)
         return 1;
     }
     buffer->len = data_size;
-    instance->stats.chart_buffered_bytes = (collected_number)buffer_strlen(buffer);
+    instance->stats.buffered_bytes = (collected_number)buffer_strlen(buffer);
 
     return 0;
 }
diff --git a/exporting/read_config.c b/exporting/read_config.c
index a9590659e0..122ceef79d 100644
--- a/exporting/read_config.c
+++ b/exporting/read_config.c
@@ -2,18 +2,19 @@
 
 #include "exporting_engine.h"
 
-struct config exporting_config = {.first_section = NULL,
-                                  .last_section = NULL,
-                                  .mutex = NETDATA_MUTEX_INITIALIZER,
-                                  .index = {.avl_tree = {.root = NULL, .compar = appconfig_section_compare},
-                                            .rwlock = AVL_LOCK_INITIALIZER}};
+struct config exporting_config = { .first_section = NULL,
+                                   .last_section = NULL,
+                                   .mutex = NETDATA_MUTEX_INITIALIZER,
+                                   .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+                                              .rwlock = AVL_LOCK_INITIALIZER } };
 
+struct instance *prometheus_exporter_instance = NULL;
 
 static _CONNECTOR_INSTANCE *find_instance(const char *section)
 {
     _CONNECTOR_INSTANCE *local_ci;
 
-    local_ci = add_connector_instance(NULL, NULL);  // Get root section
+    local_ci = add_connector_instance(NULL, NULL); // Get root section
     if (unlikely(!local_ci))
         return local_ci;
 
@@ -38,12 +39,10 @@ char *expconfig_get(struct config *root, const char *section, const char *name,
     local_ci = find_instance(section);
 
     if (!local_ci)
-        return NULL;    // TODO: Check if it is meaningful to return default_value
+        return NULL; // TODO: Check if it is meaningful to return default_value
 
     return appconfig_get(
-        root,
-        local_ci->instance_name,
-        name,
+        root, local_ci->instance_name, name,
         appconfig_get(
             root, local_ci->connector_name, name, appconfig_get(root, CONFIG_SECTION_EXPORTING, name, default_value)));
 }
@@ -58,16 +57,12 @@ int expconfig_get_boolean(struct config *root, const char *section, const char *
     local_ci = find_instance(section);
 
     if (!local_ci)
-        return 0;       // TODO: Check if it is meaningful to return default_value
+        return 0; // TODO: Check if it is meaningful to return default_value
 
     return appconfig_get_boolean(
-        root,
-        local_ci->instance_name,
-        name,
+        root, local_ci->instance_name, name,
         appconfig_get_boolean(
-            root,
-            local_ci->connector_name,
-            name,
+            root, local_ci->connector_name, name,
             appconfig_get_boolean(root, CONFIG_SECTION_EXPORTING, name, default_value)));
 }
 
@@ -81,16 +76,12 @@ long long expconfig_get_number(struct config *root, const char *section, const c
     local_ci = find_instance(section);
 
     if (!local_ci)
-        return 0;   // TODO: Check if it is meaningful to return default_value
+        return 0; // TODO: Check if it is meaningful to return default_value
 
     return appconfig_get_number(
-        root,
-        local_ci->instance_name,
-        name,
+        root, local_ci->instance_name, name,
         appconfig_get_number(
-            root,
-            local_ci->connector_name,
-            name,
+            root, local_ci->connector_name, name,
             appconfig_get_number(root, CONFIG_SECTION_EXPORTING, name, default_value)));
 }
 
@@ -108,7 +99,7 @@ int get_connector_instance(struct connector_instance *target_ci)
     static _CONNECTOR_INSTANCE *local_ci = NULL;
     _CONNECTOR_INSTANCE *global_connector_instance;
 
-    global_connector_instance = find_instance(NULL);       // Fetch head of instances
+    global_connector_instance = find_instance(NULL); // Fetch head of instances
 
     if (unlikely(!global_connector_instance))
         return 0;
@@ -140,40 +131,39 @@ int get_connector_instance(struct connector_instance *target_ci)
  *
  * @return It returns the connector id.
  */
-BACKEND_TYPE exporting_select_type(const char *type)
+EXPORTING_CONNECTOR_TYPE exporting_select_type(const char *type)
 {
     if (!strcmp(type, "graphite") || !strcmp(type, "graphite:plaintext")) {
-        return BACKEND_TYPE_GRAPHITE;
+        return EXPORTING_CONNECTOR_TYPE_GRAPHITE;
     } else if (!strcmp(type, "opentsdb") || !strcmp(type, "opentsdb:telnet")) {
-        return BACKEND_TYPE_OPENTSDB_USING_TELNET;
+        return EXPORTING_CONNECTOR_TYPE_OPENTSDB_USING_TELNET;
     } else if (!strcmp(type, "opentsdb:http") || !strcmp(type, "opentsdb:https")) {
-        return BACKEND_TYPE_OPENTSDB_USING_HTTP;
+        return EXPORTING_CONNECTOR_TYPE_OPENTSDB_USING_HTTP;
     } else if (!strcmp(type, "json") || !strcmp(type, "json:plaintext")) {
-        return BACKEND_TYPE_JSON;
+        return EXPORTING_CONNECTOR_TYPE_JSON;
     } else if (!strcmp(type, "prometheus_remote_write")) {
-        return BACKEND_TYPE_PROMETHEUS_REMOTE_WRITE;
+        return EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE;
     } else if (!strcmp(type, "kinesis") || !strcmp(type, "kinesis:plaintext")) {
-        return BACKEND_TYPE_KINESIS;
+        return EXPORTING_CONNECTOR_TYPE_KINESIS;
     } else if (!strcmp(type, "mongodb") || !strcmp(type, "mongodb:plaintext"))
-        return BACKEND_TYPE_MONGODB;
+        return EXPORTING_CONNECTOR_TYPE_MONGODB;
 
-    return BACKEND_TYPE_UNKNOWN;
+    return EXPORTING_CONNECTOR_TYPE_UNKNOWN;
 }
 
-EXPORTING_OPTIONS exporting_parse_data_source(const char *data_source, EXPORTING_OPTIONS exporting_options) {
-    if(!strcmp(data_source, "raw") || !strcmp(data_source, "as collected") || !strcmp(data_source, "as-collected") || !strcmp(data_source, "as_collected") || !strcmp(data_source, "ascollected")) {
+EXPORTING_OPTIONS exporting_parse_data_source(const char *data_source, EXPORTING_OPTIONS exporting_options)
+{
+    if (!strcmp(data_source, "raw") || !strcmp(data_source, "as collected") || !strcmp(data_source, "as-collected") ||
+        !strcmp(data_source, "as_collected") || !strcmp(data_source, "ascollected")) {
         exporting_options |= EXPORTING_SOURCE_DATA_AS_COLLECTED;
         exporting_options &= ~(EXPORTING_OPTIONS_SOURCE_BITS ^ EXPORTING_SOURCE_DATA_AS_COLLECTED);
-    }
-    else if(!strcmp(data_source, "average")) {
+    } else if (!strcmp(data_source, "average")) {
         exporting_options |= EXPORTING_SOURCE_DATA_AVERAGE;
         exporting_options &= ~(EXPORTING_OPTIONS_SOURCE_BITS ^ EXPORTING_SOURCE_DATA_AVERAGE);
-    }
-    else if(!strcmp(data_source, "sum") || !strcmp(data_source, "volume")) {
+    } else if (!strcmp(data_source, "sum") || !strcmp(data_source, "volume")) {
         exporting_options |= EXPORTING_SOURCE_DATA_SUM;
         exporting_options &= ~(EXPORTING_OPTIONS_SOURCE_BITS ^ EXPORTING_SOURCE_DATA_SUM);
-    }
-    else {
+    } else {
         error("EXPORTING: invalid data data_source method '%s'.", data_source);
     }
 
@@ -195,7 +185,7 @@ struct engine *read_exporting_config()
     static struct engine *engine = NULL;
     struct connector_instance_list {
         struct connector_instance local_ci;
-        BACKEND_TYPE backend_type;
+        EXPORTING_CONNECTOR_TYPE backend_type;
 
         struct connector_instance_list *next;
     };
@@ -220,6 +210,46 @@ struct engine *read_exporting_config()
 
     freez(filename);
 
+#define prometheus_config_get(name, value)                                                                             \
+    appconfig_get(                                                                                                     \
+        &exporting_config, CONFIG_SECTION_PROMETHEUS, name,                                                            \
+        appconfig_get(&exporting_config, CONFIG_SECTION_EXPORTING, name, value))
+#define prometheus_config_get_number(name, value)                                                                      \
+    appconfig_get_number(                                                                                              \
+        &exporting_config, CONFIG_SECTION_PROMETHEUS, name,                                                            \
+        appconfig_get_number(&exporting_config, CONFIG_SECTION_EXPORTING, name, value))
+#define prometheus_config_get_boolean(name, value)                                                                     \
+    appconfig_get_boolean(                                                                                             \
+        &exporting_config, CONFIG_SECTION_PROMETHEUS, name,                                                            \
+        appconfig_get_boolean(&exporting_config, CONFIG_SECTION_EXPORTING, name, value))
+
+    if (!prometheus_exporter_instance) {
+        prometheus_exporter_instance = callocz(1, sizeof(struct instance));
+
+        prometheus_exporter_instance->config.update_every =
+            prometheus_config_get_number(EXPORTING_UPDATE_EVERY_OPTION_NAME, EXPORTING_UPDATE_EVERY_DEFAULT);
+
+        if (prometheus_config_get_boolean("send names instead of ids", CONFIG_BOOLEAN_YES))
+            prometheus_exporter_instance->config.options |= EXPORTING_OPTION_SEND_NAMES;
+        else
+            prometheus_exporter_instance->config.options &= ~EXPORTING_OPTION_SEND_NAMES;
+
+        if (prometheus_config_get_boolean("send configured labels", CONFIG_BOOLEAN_YES))
+            prometheus_exporter_instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
+        else
+            prometheus_exporter_instance->config.options &= ~EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
+
+        if (prometheus_config_get_boolean("send automatic labels", CONFIG_BOOLEAN_NO))
+            prometheus_exporter_instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+        else
+            prometheus_exporter_instance->config.options &= ~EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+
+        prometheus_exporter_instance->config.charts_pattern =
+            simple_pattern_create(prometheus_config_get("send charts matching", "*"), NULL, SIMPLE_PATTERN_EXACT);
+        prometheus_exporter_instance->config.hosts_pattern = simple_pattern_create(
+            prometheus_config_get("send hosts matching", "localhost *"), NULL, SIMPLE_PATTERN_EXACT);
+    }
+
     // TODO: change BACKEND to EXPORTING
     while (get_connector_instance(&local_ci)) {
         info("Processing connector instance (%s)", local_ci.instance_name);
@@ -227,8 +257,7 @@ struct engine *read_exporting_config()
         if (exporter_get_boolean(local_ci.instance_name, "enabled", 0)) {
             info(
                 "Instance (%s) on connector (%s) is enabled and scheduled for activation",
-                local_ci.instance_name,
-                local_ci.connector_name);
+                local_ci.instance_name, local_ci.connector_name);
 
             tmp_ci_list = (struct connector_instance_list *)callocz(1, sizeof(struct connector_instance_list));
             memcpy(&tmp_ci_list->local_ci, &local_ci, sizeof(local_ci));
@@ -252,8 +281,8 @@ struct engine *read_exporting_config()
         engine->config.hostname =
             strdupz(exporter_get(CONFIG_SECTION_EXPORTING, "hostname", netdata_configured_hostname));
         engine->config.prefix = strdupz(exporter_get(CONFIG_SECTION_EXPORTING, "prefix", "netdata"));
-        engine->config.update_every =
-            exporter_get_number(CONFIG_SECTION_EXPORTING, EXPORTING_UPDATE_EVERY_OPTION_NAME, EXPORTING_UPDATE_EVERY_DEFAULT);
+        engine->config.update_every = exporter_get_number(
+            CONFIG_SECTION_EXPORTING, EXPORTING_UPDATE_EVERY_OPTION_NAME, EXPORTING_UPDATE_EVERY_DEFAULT);
     }
 
     while (tmp_ci_list) {
@@ -262,27 +291,27 @@ struct engine *read_exporting_config()
 
         info("Instance %s on %s", tmp_ci_list->local_ci.instance_name, tmp_ci_list->local_ci.connector_name);
 
-        if (tmp_ci_list->backend_type == BACKEND_TYPE_UNKNOWN) {
+        if (tmp_ci_list->backend_type == EXPORTING_CONNECTOR_TYPE_UNKNOWN) {
             error("Unknown exporting connector type");
             goto next_connector_instance;
         }
 
 #ifndef ENABLE_PROMETHEUS_REMOTE_WRITE
-        if (tmp_ci_list->backend_type == BACKEND_TYPE_PROMETHEUS_REMOTE_WRITE) {
+        if (tmp_ci_list->backend_type == EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE) {
             error("Prometheus Remote Write support isn't compiled");
             goto next_connector_instance;
         }
 #endif
 
 #ifndef HAVE_KINESIS
-        if (tmp_ci_list->backend_type == BACKEND_TYPE_KINESIS) {
+        if (tmp_ci_list->backend_type == EXPORTING_CONNECTOR_TYPE_KINESIS) {
             error("AWS Kinesis support isn't compiled");
             goto next_connector_instance;
         }
 #endif
 
 #ifndef HAVE_MONGOC
-        if (tmp_ci_list->backend_type == BACKEND_TYPE_MONGODB) {
+        if (tmp_ci_list->backend_type == EXPORTING_CONNECTOR_TYPE_MONGODB) {
             error("MongoDB support isn't compiled");
             goto next_connector_instance;
         }
@@ -299,41 +328,31 @@ struct engine *read_exporting_config()
 
         tmp_instance->config.name = strdupz(tmp_ci_list->local_ci.instance_name);
 
-        tmp_instance->config.destination =
-            strdupz(exporter_get(instance_name, "destination", "localhost"));
+        tmp_instance->config.destination = strdupz(exporter_get(instance_name, "destination", "localhost"));
 
         tmp_instance->config.update_every =
             exporter_get_number(instance_name, EXPORTING_UPDATE_EVERY_OPTION_NAME, EXPORTING_UPDATE_EVERY_DEFAULT);
 
-        tmp_instance->config.buffer_on_failures =
-            exporter_get_number(instance_name, "buffer on failures", 10);
+        tmp_instance->config.buffer_on_failures = exporter_get_number(instance_name, "buffer on failures", 10);
 
-        tmp_instance->config.timeoutms =
-            exporter_get_number(instance_name, "timeout ms", 10000);
+        tmp_instance->config.timeoutms = exporter_get_number(instance_name, "timeout ms", 10000);
 
-        tmp_instance->config.charts_pattern = simple_pattern_create(
-            exporter_get(instance_name, "send charts matching", "*"),
-            NULL,
-            SIMPLE_PATTERN_EXACT);
+        tmp_instance->config.charts_pattern =
+            simple_pattern_create(exporter_get(instance_name, "send charts matching", "*"), NULL, SIMPLE_PATTERN_EXACT);
 
         tmp_instance->config.hosts_pattern = simple_pattern_create(
-            exporter_get(instance_name, "send hosts matching", "localhost *"),
-            NULL,
-            SIMPLE_PATTERN_EXACT);
+            exporter_get(instance_name, "send hosts matching", "localhost *"), NULL, SIMPLE_PATTERN_EXACT);
 
-        char *data_source =
-            exporter_get(instance_name, "data source", "average");
+        char *data_source = exporter_get(instance_name, "data source", "average");
 
         tmp_instance->config.options = exporting_parse_data_source(data_source, tmp_instance->config.options);
 
-        if (exporter_get_boolean(
-                instance_name, "send configured labels", CONFIG_BOOLEAN_YES))
+        if (exporter_get_boolean(instance_name, "send configured labels", CONFIG_BOOLEAN_YES))
             tmp_instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
         else
             tmp_instance->config.options &= ~EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
 
-        if (exporter_get_boolean(
-                instance_name, "send automatic labels", CONFIG_BOOLEAN_NO))
+        if (exporter_get_boolean(instance_name, "send automatic labels", CONFIG_BOOLEAN_NO))
             tmp_instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
         else
             tmp_instance->config.options &= ~EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
@@ -343,33 +362,30 @@ struct engine *read_exporting_config()
         else
             tmp_instance->config.options &= ~EXPORTING_OPTION_SEND_NAMES;
 
-        if (tmp_instance->config.type == BACKEND_TYPE_PROMETHEUS_REMOTE_WRITE) {
+        if (tmp_instance->config.type == EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE) {
             struct prometheus_remote_write_specific_config *connector_specific_config =
                 callocz(1, sizeof(struct prometheus_remote_write_specific_config));
 
             tmp_instance->config.connector_specific_config = connector_specific_config;
 
-            connector_specific_config->remote_write_path = strdupz(exporter_get(
-                instance_name, "remote write URL path", "/receive"));
+            connector_specific_config->remote_write_path =
+                strdupz(exporter_get(instance_name, "remote write URL path", "/receive"));
         }
 
-        if (tmp_instance->config.type == BACKEND_TYPE_KINESIS) {
+        if (tmp_instance->config.type == EXPORTING_CONNECTOR_TYPE_KINESIS) {
             struct aws_kinesis_specific_config *connector_specific_config =
                 callocz(1, sizeof(struct aws_kinesis_specific_config));
 
             tmp_instance->config.connector_specific_config = connector_specific_config;
 
-            connector_specific_config->stream_name = strdupz(exporter_get(
-                instance_name, "stream name", "netdata"));
+            connector_specific_config->stream_name = strdupz(exporter_get(instance_name, "stream name", "netdata"));
 
-            connector_specific_config->auth_key_id = strdupz(exporter_get(
-                instance_name, "aws_access_key_id", ""));
+            connector_specific_config->auth_key_id = strdupz(exporter_get(instance_name, "aws_access_key_id", ""));
 
-            connector_specific_config->secure_key = strdupz(exporter_get(
-                instance_name, "aws_secret_access_key", ""));
+            connector_specific_config->secure_key = strdupz(exporter_get(instance_name, "aws_secret_access_key", ""));
         }
 
-        if (tmp_instance->config.type == BACKEND_TYPE_MONGODB) {
+        if (tmp_instance->config.type == EXPORTING_CONNECTOR_TYPE_MONGODB) {
             struct mongodb_specific_config *connector_specific_config =
                 callocz(1, sizeof(struct mongodb_specific_config));
 
@@ -393,14 +409,13 @@ struct engine *read_exporting_config()
 #endif
 
         if (unlikely(!exporting_config_exists) && !engine->config.hostname) {
-            engine->config.hostname =
-                strdupz(config_get(instance_name, "hostname", netdata_configured_hostname));
+            engine->config.hostname = strdupz(config_get(instance_name, "hostname", netdata_configured_hostname));
             engine->config.prefix = strdupz(config_get(instance_name, "prefix", "netdata"));
             engine->config.update_every =
                 config_get_number(instance_name, EXPORTING_UPDATE_EVERY_OPTION_NAME, EXPORTING_UPDATE_EVERY_DEFAULT);
         }
 
-next_connector_instance:
+    next_connector_instance:
         tmp_ci_list1 = tmp_ci_list->next;
         freez(tmp_ci_list);
         tmp_ci_list = tmp_ci_list1;
diff --git a/exporting/send_data.c b/exporting/send_data.c
index 6315a7d1bf..8875065f2b 100644
--- a/exporting/send_data.c
+++ b/exporting/send_data.c
@@ -57,8 +57,8 @@ void simple_connector_receive_response(int *sock, struct instance *instance)
         if (likely(r > 0)) {
             // we received some data
             response->len += r;
-            stats->chart_received_bytes += r;
-            stats->chart_receptions++;
+            stats->received_bytes += r;
+            stats->receptions++;
         } else if (r == 0) {
             error("EXPORTING: '%s' closed the socket", instance->config.destination);
             close(*sock);
@@ -109,9 +109,9 @@ void simple_connector_send_buffer(int *sock, int *failures, struct instance *ins
 
     if(written != -1 && (size_t)written == len) {
         // we sent the data successfully
-        stats->chart_transmission_successes++;
-        stats->chart_sent_bytes += written;
-        stats->chart_sent_metrics = stats->chart_buffered_metrics;
+        stats->transmission_successes++;
+        stats->sent_bytes += written;
+        stats->sent_metrics = stats->buffered_metrics;
 
         // reset the failures count
         *failures = 0;
@@ -126,10 +126,10 @@ void simple_connector_send_buffer(int *sock, int *failures, struct instance *ins
             instance->config.destination,
             len,
             written);
-        stats->chart_transmission_failures++;
+        stats->transmission_failures++;
 
         if(written != -1)
-            stats->chart_sent_bytes += written;
+            stats->sent_bytes += written;
 
         // increment the counter we check for data loss
         (*failures)++;
@@ -160,6 +160,19 @@ void simple_connector_worker(void *instance_p)
     int failures = 0;
 
     while(!netdata_exit) {
+
+        // reset the monitoring chart counters
+        stats->received_bytes =
+        stats->sent_bytes =
+        stats->sent_metrics =
+        stats->lost_metrics =
+        stats->receptions =
+        stats->transmission_successes =
+        stats->transmission_failures =
+        stats->data_lost_events =
+        stats->lost_bytes =
+        stats->reconnects = 0;
+
         // ------------------------------------------------------------------------
         // if we are connected, receive a response, without blocking
 
@@ -179,7 +192,7 @@ void simple_connector_worker(void *instance_p)
                 &reconnects,
                 NULL,
                 0);
-            stats->chart_reconnects += reconnects;
+            stats->reconnects += reconnects;
         }
 
         if(unlikely(netdata_exit)) break;
@@ -194,12 +207,31 @@ void simple_connector_worker(void *instance_p)
             simple_connector_send_buffer(&sock, &failures, instance);
         } else {
             error("EXPORTING: failed to update '%s'", instance->config.destination);
-            stats->chart_transmission_failures++;
+            stats->transmission_failures++;
 
             // increment the counter we check for data loss
             failures++;
         }
 
+        BUFFER *buffer = instance->buffer;
+
+        if (failures > instance->config.buffer_on_failures) {
+            stats->lost_bytes += buffer_strlen(buffer);
+            error(
+                "EXPORTING: connector instance %s reached %d exporting failures. "
+                "Flushing buffers to protect this host - this results in data loss on server '%s'",
+                instance->config.name, failures, instance->config.destination);
+            buffer_flush(buffer);
+            failures = 0;
+            stats->data_lost_events++;
+            stats->lost_metrics = stats->buffered_metrics;
+        }
+
+        send_internal_metrics(instance);
+
+        if(likely(buffer_strlen(buffer) == 0))
+            stats->buffered_metrics = 0;
+
         uv_mutex_unlock(&instance->mutex);
 
 #ifdef UNIT_TESTING
diff --git a/exporting/send_internal_metrics.c b/exporting/send_internal_metrics.c
index b93918695b..e4111a587b 100644
--- a/exporting/send_internal_metrics.c
+++ b/exporting/send_internal_metrics.c
@@ -3,16 +3,170 @@
 #include "exporting_engine.h"
 
 /**
- * Send internal metrics
+ * Create a chart for the main exporting thread CPU usage
+ *
+ * @param st_rusage the thead CPU usage chart
+ * @param rd_user a dimension for user CPU usage
+ * @param rd_system a dimension for system CPU usage
+ */
+void create_main_rusage_chart(RRDSET **st_rusage, RRDDIM **rd_user, RRDDIM **rd_system)
+{
+    if (*st_rusage && *rd_user && *rd_system)
+        return;
+
+    *st_rusage = rrdset_create_localhost(
+        "netdata", "exporting_main_thread_cpu", NULL, "exporting", NULL, "Netdata Main Exporting Thread CPU Usage",
+        "milliseconds/s", "exporting", NULL, 130600, localhost->rrd_update_every, RRDSET_TYPE_STACKED);
+
+    *rd_user = rrddim_add(*st_rusage, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+    *rd_system = rrddim_add(*st_rusage, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+}
+
+/**
+ * Send the main exporting thread CPU usage
+ *
+ * @param st_rusage a thead CPU usage chart
+ * @param rd_user a dimension for user CPU usage
+ * @param rd_system a dimension for system CPU usage
+ */
+void send_main_rusage(RRDSET *st_rusage, RRDDIM *rd_user, RRDDIM *rd_system)
+{
+    struct rusage thread;
+    getrusage(RUSAGE_THREAD, &thread);
+
+    if (likely(st_rusage->counter_done))
+        rrdset_next(st_rusage);
+
+    rrddim_set_by_pointer(st_rusage, rd_user,   thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec);
+    rrddim_set_by_pointer(st_rusage, rd_system, thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec);
+
+    rrdset_done(st_rusage);
+}
+
+/**
+ * Send internal metrics for an instance
  *
  * Send performance metrics for the operation of exporting engine itself to the Netdata database.
  *
- * @param engine an engine data structure.
- * @return Returns 0 on success, 1 on failure.
+ * @param instance an instance data structure.
  */
-int send_internal_metrics(struct engine *engine)
+void send_internal_metrics(struct instance *instance)
 {
-    (void)engine;
+    struct stats *stats = &instance->stats;
 
-    return 0;
+    // ------------------------------------------------------------------------
+    // create charts for monitoring the exporting operations
+
+    if (!stats->initialized) {
+        char id[RRD_ID_LENGTH_MAX + 1];
+        BUFFER *family = buffer_create(0);
+
+        buffer_sprintf(family, "exporting_%s", instance->config.name);
+
+        snprintf(id, RRD_ID_LENGTH_MAX, "exporting_%s_metrics", instance->config.name);
+        netdata_fix_chart_id(id);
+
+        stats->st_metrics = rrdset_create_localhost(
+            "netdata", id, NULL, buffer_tostring(family), NULL, "Netdata Buffered Metrics", "metrics", "exporting", NULL,
+            130610, instance->config.update_every, RRDSET_TYPE_LINE);
+
+        stats->rd_buffered_metrics = rrddim_add(stats->st_metrics, "buffered", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+        stats->rd_lost_metrics     = rrddim_add(stats->st_metrics, "lost", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+        stats->rd_sent_metrics     = rrddim_add(stats->st_metrics, "sent", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+        // ------------------------------------------------------------------------
+
+        snprintf(id, RRD_ID_LENGTH_MAX, "exporting_%s_bytes", instance->config.name);
+        netdata_fix_chart_id(id);
+
+        stats->st_bytes = rrdset_create_localhost(
+            "netdata", id, NULL, buffer_tostring(family), NULL, "Netdata Exporting Data Size", "KiB", "exporting", NULL,
+            130620, instance->config.update_every, RRDSET_TYPE_AREA);
+
+        stats->rd_buffered_bytes = rrddim_add(stats->st_bytes, "buffered", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+        stats->rd_lost_bytes     = rrddim_add(stats->st_bytes, "lost", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+        stats->rd_sent_bytes     = rrddim_add(stats->st_bytes, "sent", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+        stats->rd_received_bytes = rrddim_add(stats->st_bytes, "received", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+
+        // ------------------------------------------------------------------------
+
+        snprintf(id, RRD_ID_LENGTH_MAX, "exporting_%s_ops", instance->config.name);
+        netdata_fix_chart_id(id);
+
+        stats->st_ops = rrdset_create_localhost(
+            "netdata", id, NULL, buffer_tostring(family), NULL, "Netdata Exporting Operations", "operations", "exporting",
+            NULL, 130630, instance->config.update_every, RRDSET_TYPE_LINE);
+
+        stats->rd_transmission_successes = rrddim_add(stats->st_ops, "write", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+        stats->rd_data_lost_events       = rrddim_add(stats->st_ops, "discard", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+        stats->rd_reconnects             = rrddim_add(stats->st_ops, "reconnect", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+        stats->rd_transmission_failures  = rrddim_add(stats->st_ops, "failure", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+        stats->rd_receptions             = rrddim_add(stats->st_ops, "read", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+        // ------------------------------------------------------------------------
+
+        snprintf(id, RRD_ID_LENGTH_MAX, "exporting_%s_thread_cpu", instance->config.name);
+        netdata_fix_chart_id(id);
+
+        stats->st_rusage = rrdset_create_localhost(
+            "netdata", id, NULL, buffer_tostring(family), NULL, "Netdata Exporting Instance Thread CPU Usage",
+            "milliseconds/s", "exporting", NULL, 130640, instance->config.update_every, RRDSET_TYPE_STACKED);
+
+        stats->rd_user   = rrddim_add(stats->st_rusage, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+        stats->rd_system = rrddim_add(stats->st_rusage, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+
+        buffer_free(family);
+
+        stats->initialized = 1;
+    }
+
+    // ------------------------------------------------------------------------
+    // update the monitoring charts
+
+    if (likely(stats->st_metrics->counter_done))
+        rrdset_next(stats->st_metrics);
+
+    rrddim_set_by_pointer(stats->st_metrics, stats->rd_buffered_metrics, stats->buffered_metrics);
+    rrddim_set_by_pointer(stats->st_metrics, stats->rd_lost_metrics,     stats->lost_metrics);
+    rrddim_set_by_pointer(stats->st_metrics, stats->rd_sent_metrics,     stats->sent_metrics);
+
+    rrdset_done(stats->st_metrics);
+
+    // ------------------------------------------------------------------------
+
+    if (likely(stats->st_bytes->counter_done))
+        rrdset_next(stats->st_bytes);
+
+    rrddim_set_by_pointer(stats->st_bytes, stats->rd_buffered_bytes, stats->buffered_bytes);
+    rrddim_set_by_pointer(stats->st_bytes, stats->rd_lost_bytes,     stats->lost_bytes);
+    rrddim_set_by_pointer(stats->st_bytes, stats->rd_sent_bytes,     stats->sent_bytes);
+    rrddim_set_by_pointer(stats->st_bytes, stats->rd_received_bytes, stats->received_bytes);
+
+    rrdset_done(stats->st_bytes);
+
+    // ------------------------------------------------------------------------
+
+    if (likely(stats->st_ops->counter_done))
+        rrdset_next(stats->st_ops);
+
+    rrddim_set_by_pointer(stats->st_ops, stats->rd_transmission_successes, stats->transmission_successes);
+    rrddim_set_by_pointer(stats->st_ops, stats->rd_data_lost_events,       stats->data_lost_events);
+    rrddim_set_by_pointer(stats->st_ops, stats->rd_reconnects,             stats->reconnects);
+    rrddim_set_by_pointer(stats->st_ops, stats->rd_transmission_failures,  stats->transmission_failures);
+    rrddim_set_by_pointer(stats->st_ops, stats->rd_receptions,             stats->receptions);
+
+    rrdset_done(stats->st_ops);
+
+    // ------------------------------------------------------------------------
+
+    struct rusage thread;
+    getrusage(RUSAGE_THREAD, &thread);
+
+    if (likely(stats->st_rusage->counter_done))
+        rrdset_next(stats->st_rusage);
+
+    rrddim_set_by_pointer(stats->st_rusage, stats->rd_user,   thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec);
+    rrddim_set_by_pointer(stats->st_rusage, stats->rd_system, thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec);
+
+    rrdset_done(stats->st_rusage);
 }
diff --git a/exporting/tests/exporting_doubles.c b/exporting/tests/exporting_doubles.c
index 0d3158d46f..29935a7d19 100644
--- a/exporting/tests/exporting_doubles.c
+++ b/exporting/tests/exporting_doubles.c
@@ -20,7 +20,7 @@ struct engine *__mock_read_exporting_config()
     engine->instance_root = calloc(1, sizeof(struct instance));
     struct instance *instance = engine->instance_root;
     instance->engine = engine;
-    instance->config.type = BACKEND_TYPE_GRAPHITE;
+    instance->config.type = EXPORTING_CONNECTOR_TYPE_GRAPHITE;
     instance->config.name = strdupz("instance_name");
     instance->config.destination = strdupz("localhost");
     instance->config.update_every = 1;
@@ -82,10 +82,26 @@ int __wrap_notify_workers(struct engine *engine)
     return mock_type(int);
 }
 
-int __wrap_send_internal_metrics(struct engine *engine)
+void __wrap_create_main_rusage_chart(RRDSET **st_rusage, RRDDIM **rd_user, RRDDIM **rd_system)
 {
     function_called();
-    check_expected_ptr(engine);
+    check_expected_ptr(st_rusage);
+    check_expected_ptr(rd_user);
+    check_expected_ptr(rd_system);
+}
+
+void __wrap_send_main_rusage(RRDSET *st_rusage, RRDDIM *rd_user, RRDDIM *rd_system)
+{
+    function_called();
+    check_expected_ptr(st_rusage);
+    check_expected_ptr(rd_user);
+    check_expected_ptr(rd_system);
+}
+
+int __wrap_send_internal_metrics(struct instance *instance)
+{
+    function_called();
+    check_expected_ptr(instance);
     return mock_type(int);
 }
 
diff --git a/exporting/tests/exporting_fixtures.c b/exporting/tests/exporting_fixtures.c
index 026cf579ab..ebd4e360e0 100644
--- a/exporting/tests/exporting_fixtures.c
+++ b/exporting/tests/exporting_fixtures.c
@@ -127,3 +127,35 @@ int teardown_initialized_engine(void **state)
 
     return 0;
 }
+
+int setup_prometheus(void **state)
+{
+    (void)state;
+
+    prometheus_exporter_instance = calloc(1, sizeof(struct instance));
+
+    setup_rrdhost();
+
+    prometheus_exporter_instance->config.update_every = 10;
+
+    prometheus_exporter_instance->config.options |=
+        EXPORTING_OPTION_SEND_NAMES | EXPORTING_OPTION_SEND_CONFIGURED_LABELS | EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+
+    prometheus_exporter_instance->config.charts_pattern = simple_pattern_create("*", NULL, SIMPLE_PATTERN_EXACT);
+    prometheus_exporter_instance->config.hosts_pattern = simple_pattern_create("*", NULL, SIMPLE_PATTERN_EXACT);
+
+    return 0;
+}
+
+int teardown_prometheus(void **state)
+{
+    (void)state;
+
+    teardown_rrdhost();
+
+    simple_pattern_free(prometheus_exporter_instance->config.charts_pattern);
+    simple_pattern_free(prometheus_exporter_instance->config.hosts_pattern);
+    free(prometheus_exporter_instance);
+
+    return 0;
+}
diff --git a/exporting/tests/netdata_doubles.c b/exporting/tests/netdata_doubles.c
index 41a5f4c2f7..f4da7769f6 100644
--- a/exporting/tests/netdata_doubles.c
+++ b/exporting/tests/netdata_doubles.c
@@ -85,6 +85,92 @@ void __rrd_check_rdlock(const char *file, const char *function, const unsigned l
     (void)line;
 }
 
+RRDSET *rrdset_create_custom(
+    RRDHOST *host,
+    const char *type,
+    const char *id,
+    const char *name,
+    const char *family,
+    const char *context,
+    const char *title,
+    const char *units,
+    const char *plugin,
+    const char *module,
+    long priority,
+    int update_every,
+    RRDSET_TYPE chart_type,
+    RRD_MEMORY_MODE memory_mode,
+    long history_entries)
+{
+    check_expected_ptr(host);
+    check_expected_ptr(type);
+    check_expected_ptr(id);
+    check_expected_ptr(name);
+    check_expected_ptr(family);
+    check_expected_ptr(context);
+    UNUSED(title);
+    check_expected_ptr(units);
+    check_expected_ptr(plugin);
+    check_expected_ptr(module);
+    check_expected(priority);
+    check_expected(update_every);
+    check_expected(chart_type);
+    UNUSED(memory_mode);
+    UNUSED(history_entries);
+
+    function_called();
+
+    return mock_ptr_type(RRDSET *);
+}
+
+void rrdset_next_usec(RRDSET *st, usec_t microseconds)
+{
+    check_expected_ptr(st);
+    UNUSED(microseconds);
+
+    function_called();
+}
+
+void rrdset_done(RRDSET *st)
+{
+    check_expected_ptr(st);
+
+    function_called();
+}
+
+RRDDIM *rrddim_add_custom(
+    RRDSET *st,
+    const char *id,
+    const char *name,
+    collected_number multiplier,
+    collected_number divisor,
+    RRD_ALGORITHM algorithm,
+    RRD_MEMORY_MODE memory_mode)
+{
+    check_expected_ptr(st);
+    UNUSED(id);
+    check_expected_ptr(name);
+    check_expected(multiplier);
+    check_expected(divisor);
+    check_expected(algorithm);
+    UNUSED(memory_mode);
+
+    function_called();
+
+    return NULL;
+}
+
+collected_number rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, collected_number value)
+{
+    check_expected_ptr(st);
+    UNUSED(rd);
+    UNUSED(value);
+
+    function_called();
+
+    return 0;
+}
+
 const char *rrd_memory_mode_name(RRD_MEMORY_MODE id)
 {
     (void)id;
diff --git a/exporting/tests/test_exporting_engine.c b/exporting/tests/test_exporting_engine.c
index 06d8878af5..09715ddbc5 100644
--- a/exporting/tests/test_exporting_engine.c
+++ b/exporting/tests/test_exporting_engine.c
@@ -44,6 +44,11 @@ static void test_exporting_engine(void **state)
     expect_memory(__wrap_init_connectors, engine, engine, sizeof(struct engine));
     will_return(__wrap_init_connectors, 0);
 
+    expect_function_call(__wrap_create_main_rusage_chart);
+    expect_not_value(__wrap_create_main_rusage_chart, st_rusage, NULL);
+    expect_not_value(__wrap_create_main_rusage_chart, rd_user, NULL);
+    expect_not_value(__wrap_create_main_rusage_chart, rd_system, NULL);
+
     expect_function_call(__wrap_now_realtime_sec);
     will_return(__wrap_now_realtime_sec, 2);
 
@@ -59,9 +64,10 @@ static void test_exporting_engine(void **state)
     expect_memory(__wrap_notify_workers, engine, engine, sizeof(struct engine));
     will_return(__wrap_notify_workers, 0);
 
-    expect_function_call(__wrap_send_internal_metrics);
-    expect_memory(__wrap_send_internal_metrics, engine, engine, sizeof(struct engine));
-    will_return(__wrap_send_internal_metrics, 0);
+    expect_function_call(__wrap_send_main_rusage);
+    expect_value(__wrap_send_main_rusage, st_rusage, NULL);
+    expect_value(__wrap_send_main_rusage, rd_user, NULL);
+    expect_value(__wrap_send_main_rusage, rd_system, NULL);
 
     expect_function_call(__wrap_info_int);
 
@@ -87,7 +93,7 @@ static void test_read_exporting_config(void **state)
     assert_ptr_not_equal(instance, NULL);
     assert_ptr_equal(instance->next, NULL);
     assert_ptr_equal(instance->engine, engine);
-    assert_int_equal(instance->config.type, BACKEND_TYPE_GRAPHITE);
+    assert_int_equal(instance->config.type, EXPORTING_CONNECTOR_TYPE_GRAPHITE);
     assert_string_equal(instance->config.destination, "localhost");
     assert_int_equal(instance->config.update_every, 1);
     assert_int_equal(instance->config.buffer_on_failures, 10);
@@ -122,7 +128,7 @@ static void test_init_connectors(void **state)
     assert_ptr_equal(instance->metric_formatting, format_dimension_collected_graphite_plaintext);
     assert_ptr_equal(instance->end_chart_formatting, NULL);
     assert_ptr_equal(instance->end_host_formatting, flush_host_labels);
-    assert_ptr_equal(instance->end_batch_formatting, NULL);
+    assert_ptr_equal(instance->end_batch_formatting, simple_connector_update_buffered_bytes);
 
     BUFFER *buffer = instance->buffer;
     assert_ptr_not_equal(buffer, NULL);
@@ -384,7 +390,7 @@ static void test_prepare_buffers(void **state)
 
     assert_int_equal(__real_prepare_buffers(engine), 0);
 
-    assert_int_equal(instance->stats.chart_buffered_metrics, 1);
+    assert_int_equal(instance->stats.buffered_metrics, 1);
 
     // check with NULL functions
     instance->start_batch_formatting = NULL;
@@ -573,8 +579,8 @@ static void test_simple_connector_receive_response(void **state)
         log_line,
         "EXPORTING: received 9 bytes from instance_name connector instance. Ignoring them. Sample: 'Test recv'");
 
-    assert_int_equal(stats->chart_received_bytes, 9);
-    assert_int_equal(stats->chart_receptions, 1);
+    assert_int_equal(stats->received_bytes, 9);
+    assert_int_equal(stats->receptions, 1);
     assert_int_equal(sock, 1);
 }
 
@@ -614,10 +620,10 @@ static void test_simple_connector_send_buffer(void **state)
     simple_connector_send_buffer(&sock, &failures, instance);
 
     assert_int_equal(failures, 0);
-    assert_int_equal(stats->chart_transmission_successes, 1);
-    assert_int_equal(stats->chart_sent_bytes, 84);
-    assert_int_equal(stats->chart_sent_metrics, 1);
-    assert_int_equal(stats->chart_transmission_failures, 0);
+    assert_int_equal(stats->transmission_successes, 1);
+    assert_int_equal(stats->sent_bytes, 84);
+    assert_int_equal(stats->sent_metrics, 1);
+    assert_int_equal(stats->transmission_failures, 0);
 
     assert_int_equal(buffer_strlen(buffer), 0);
 
@@ -628,6 +634,7 @@ static void test_simple_connector_worker(void **state)
 {
     struct engine *engine = *state;
     struct instance *instance = engine->instance_root;
+    struct stats *stats = &instance->stats;
     BUFFER *buffer = instance->buffer;
 
     __real_mark_scheduled_instances(engine);
@@ -661,7 +668,24 @@ static void test_simple_connector_worker(void **state)
     expect_value(__wrap_send, len, 84);
     expect_value(__wrap_send, flags, MSG_NOSIGNAL);
 
+    expect_function_call(__wrap_send_internal_metrics);
+    expect_value(__wrap_send_internal_metrics, instance, instance);
+    will_return(__wrap_send_internal_metrics, 0);
+
     simple_connector_worker(instance);
+
+    assert_int_equal(stats->buffered_metrics, 0);
+    assert_int_equal(stats->buffered_bytes, 84);
+    assert_int_equal(stats->received_bytes, 0);
+    assert_int_equal(stats->sent_bytes, 84);
+    assert_int_equal(stats->sent_metrics, 1);
+    assert_int_equal(stats->lost_metrics, 0);
+    assert_int_equal(stats->receptions, 0);
+    assert_int_equal(stats->transmission_successes, 1);
+    assert_int_equal(stats->transmission_failures, 0);
+    assert_int_equal(stats->data_lost_events, 0);
+    assert_int_equal(stats->lost_bytes, 0);
+    assert_int_equal(stats->reconnects, 0);
 }
 
 static void test_sanitize_json_string(void **state)
@@ -761,6 +785,367 @@ static void test_flush_host_labels(void **state)
     assert_int_equal(buffer_strlen(instance->labels), 0);
 }
 
+static void test_create_main_rusage_chart(void **state)
+{
+    UNUSED(state);
+
+    RRDSET *st_rusage = calloc(1, sizeof(RRDSET));
+    RRDDIM *rd_user = NULL;
+    RRDDIM *rd_system = NULL;
+
+    expect_function_call(rrdset_create_custom);
+    expect_value(rrdset_create_custom, host, localhost);
+    expect_string(rrdset_create_custom, type, "netdata");
+    expect_string(rrdset_create_custom, id, "exporting_main_thread_cpu");
+    expect_value(rrdset_create_custom, name, NULL);
+    expect_string(rrdset_create_custom, family, "exporting");
+    expect_value(rrdset_create_custom, context, NULL);
+    expect_string(rrdset_create_custom, units, "milliseconds/s");
+    expect_string(rrdset_create_custom, plugin, "exporting");
+    expect_value(rrdset_create_custom, module, NULL);
+    expect_value(rrdset_create_custom, priority, 130600);
+    expect_value(rrdset_create_custom, update_every, localhost->rrd_update_every);
+    expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_STACKED);
+    will_return(rrdset_create_custom, st_rusage);
+
+    expect_function_calls(rrddim_add_custom, 2);
+    expect_value_count(rrddim_add_custom, st, st_rusage, 2);
+    expect_value_count(rrddim_add_custom, name, NULL, 2);
+    expect_value_count(rrddim_add_custom, multiplier, 1, 2);
+    expect_value_count(rrddim_add_custom, divisor, 1000, 2);
+    expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_INCREMENTAL, 2);
+
+    __real_create_main_rusage_chart(&st_rusage, &rd_user, &rd_system);
+
+    free(st_rusage);
+}
+
+static void test_send_main_rusage(void **state)
+{
+    UNUSED(state);
+
+    RRDSET *st_rusage = calloc(1, sizeof(RRDSET));
+    st_rusage->counter_done = 1;
+
+    expect_function_call(rrdset_next_usec);
+    expect_value(rrdset_next_usec, st, st_rusage);
+
+    expect_function_calls(rrddim_set_by_pointer, 2);
+    expect_value_count(rrddim_set_by_pointer, st, st_rusage, 2);
+
+    expect_function_call(rrdset_done);
+    expect_value(rrdset_done, st, st_rusage);
+
+    __real_send_main_rusage(st_rusage, NULL, NULL);
+
+    free(st_rusage);
+}
+
+static void test_send_internal_metrics(void **state)
+{
+    UNUSED(state);
+
+    struct instance *instance = calloc(1, sizeof(struct instance));
+    instance->config.name = (const char *)strdupz("test_instance");
+    instance->config.update_every = 2;
+
+    struct stats *stats = &instance->stats;
+
+    stats->st_metrics = calloc(1, sizeof(RRDSET));
+    stats->st_metrics->counter_done = 1;
+    stats->st_bytes = calloc(1, sizeof(RRDSET));
+    stats->st_bytes->counter_done = 1;
+    stats->st_ops = calloc(1, sizeof(RRDSET));
+    stats->st_ops->counter_done = 1;
+    stats->st_rusage = calloc(1, sizeof(RRDSET));
+    stats->st_rusage->counter_done = 1;
+
+    // ------------------------------------------------------------------------
+
+    expect_function_call(rrdset_create_custom);
+    expect_value(rrdset_create_custom, host, localhost);
+    expect_string(rrdset_create_custom, type, "netdata");
+    expect_string(rrdset_create_custom, id, "exporting_test_instance_metrics");
+    expect_value(rrdset_create_custom, name, NULL);
+    expect_string(rrdset_create_custom, family, "exporting_test_instance");
+    expect_value(rrdset_create_custom, context, NULL);
+    expect_string(rrdset_create_custom, units, "metrics");
+    expect_string(rrdset_create_custom, plugin, "exporting");
+    expect_value(rrdset_create_custom, module, NULL);
+    expect_value(rrdset_create_custom, priority, 130610);
+    expect_value(rrdset_create_custom, update_every, 2);
+    expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_LINE);
+    will_return(rrdset_create_custom, stats->st_metrics);
+
+    expect_function_calls(rrddim_add_custom, 3);
+    expect_value_count(rrddim_add_custom, st, stats->st_metrics, 3);
+    expect_value_count(rrddim_add_custom, name, NULL, 3);
+    expect_value_count(rrddim_add_custom, multiplier, 1, 3);
+    expect_value_count(rrddim_add_custom, divisor, 1, 3);
+    expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_ABSOLUTE, 3);
+
+    // ------------------------------------------------------------------------
+
+    expect_function_call(rrdset_create_custom);
+    expect_value(rrdset_create_custom, host, localhost);
+    expect_string(rrdset_create_custom, type, "netdata");
+    expect_string(rrdset_create_custom, id, "exporting_test_instance_bytes");
+    expect_value(rrdset_create_custom, name, NULL);
+    expect_string(rrdset_create_custom, family, "exporting_test_instance");
+    expect_value(rrdset_create_custom, context, NULL);
+    expect_string(rrdset_create_custom, units, "KiB");
+    expect_string(rrdset_create_custom, plugin, "exporting");
+    expect_value(rrdset_create_custom, module, NULL);
+    expect_value(rrdset_create_custom, priority, 130620);
+    expect_value(rrdset_create_custom, update_every, 2);
+    expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_AREA);
+    will_return(rrdset_create_custom, stats->st_bytes);
+
+    expect_function_calls(rrddim_add_custom, 4);
+    expect_value_count(rrddim_add_custom, st, stats->st_bytes, 4);
+    expect_value_count(rrddim_add_custom, name, NULL, 4);
+    expect_value_count(rrddim_add_custom, multiplier, 1, 4);
+    expect_value_count(rrddim_add_custom, divisor, 1024, 4);
+    expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_ABSOLUTE, 4);
+
+    // ------------------------------------------------------------------------
+
+    expect_function_call(rrdset_create_custom);
+    expect_value(rrdset_create_custom, host, localhost);
+    expect_string(rrdset_create_custom, type, "netdata");
+    expect_string(rrdset_create_custom, id, "exporting_test_instance_ops");
+    expect_value(rrdset_create_custom, name, NULL);
+    expect_string(rrdset_create_custom, family, "exporting_test_instance");
+    expect_value(rrdset_create_custom, context, NULL);
+    expect_string(rrdset_create_custom, units, "operations");
+    expect_string(rrdset_create_custom, plugin, "exporting");
+    expect_value(rrdset_create_custom, module, NULL);
+    expect_value(rrdset_create_custom, priority, 130630);
+    expect_value(rrdset_create_custom, update_every, 2);
+    expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_LINE);
+    will_return(rrdset_create_custom, stats->st_ops);
+
+    expect_function_calls(rrddim_add_custom, 5);
+    expect_value_count(rrddim_add_custom, st, stats->st_ops, 5);
+    expect_value_count(rrddim_add_custom, name, NULL, 5);
+    expect_value_count(rrddim_add_custom, multiplier, 1, 5);
+    expect_value_count(rrddim_add_custom, divisor, 1, 5);
+    expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_ABSOLUTE, 5);
+
+    // ------------------------------------------------------------------------
+
+    expect_function_call(rrdset_create_custom);
+    expect_value(rrdset_create_custom, host, localhost);
+    expect_string(rrdset_create_custom, type, "netdata");
+    expect_string(rrdset_create_custom, id, "exporting_test_instance_thread_cpu");
+    expect_value(rrdset_create_custom, name, NULL);
+    expect_string(rrdset_create_custom, family, "exporting_test_instance");
+    expect_value(rrdset_create_custom, context, NULL);
+    expect_string(rrdset_create_custom, units, "milliseconds/s");
+    expect_string(rrdset_create_custom, plugin, "exporting");
+    expect_value(rrdset_create_custom, module, NULL);
+    expect_value(rrdset_create_custom, priority, 130640);
+    expect_value(rrdset_create_custom, update_every, 2);
+    expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_STACKED);
+    will_return(rrdset_create_custom, stats->st_rusage);
+
+    expect_function_calls(rrddim_add_custom, 2);
+    expect_value_count(rrddim_add_custom, st, stats->st_rusage, 2);
+    expect_value_count(rrddim_add_custom, name, NULL, 2);
+    expect_value_count(rrddim_add_custom, multiplier, 1, 2);
+    expect_value_count(rrddim_add_custom, divisor, 1000, 2);
+    expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_INCREMENTAL, 2);
+
+    // ------------------------------------------------------------------------
+
+    expect_function_call(rrdset_next_usec);
+    expect_value(rrdset_next_usec, st, stats->st_metrics);
+
+    expect_function_calls(rrddim_set_by_pointer, 3);
+    expect_value_count(rrddim_set_by_pointer, st, stats->st_metrics, 3);
+
+    expect_function_call(rrdset_done);
+    expect_value(rrdset_done, st, stats->st_metrics);
+
+    // ------------------------------------------------------------------------
+
+    expect_function_call(rrdset_next_usec);
+    expect_value(rrdset_next_usec, st, stats->st_bytes);
+
+    expect_function_calls(rrddim_set_by_pointer, 4);
+    expect_value_count(rrddim_set_by_pointer, st, stats->st_bytes, 4);
+
+    expect_function_call(rrdset_done);
+    expect_value(rrdset_done, st, stats->st_bytes);
+
+    // ------------------------------------------------------------------------
+
+    expect_function_call(rrdset_next_usec);
+    expect_value(rrdset_next_usec, st, stats->st_ops);
+
+    expect_function_calls(rrddim_set_by_pointer, 5);
+    expect_value_count(rrddim_set_by_pointer, st, stats->st_ops, 5);
+
+    expect_function_call(rrdset_done);
+    expect_value(rrdset_done, st, stats->st_ops);
+
+    // ------------------------------------------------------------------------
+
+    expect_function_call(rrdset_next_usec);
+    expect_value(rrdset_next_usec, st, stats->st_rusage);
+
+    expect_function_calls(rrddim_set_by_pointer, 2);
+    expect_value_count(rrddim_set_by_pointer, st, stats->st_rusage, 2);
+
+    expect_function_call(rrdset_done);
+    expect_value(rrdset_done, st, stats->st_rusage);
+
+    // ------------------------------------------------------------------------
+
+    __real_send_internal_metrics(instance);
+
+    free(stats->st_metrics);
+    free(stats->st_bytes);
+    free(stats->st_ops);
+    free(stats->st_rusage);
+    free((void *)instance->config.name);
+    free(instance);
+}
+
+static void test_can_send_rrdset(void **state)
+{
+    (void)*state;
+
+    assert_int_equal(can_send_rrdset(prometheus_exporter_instance, localhost->rrdset_root), 1);
+
+    rrdset_flag_set(localhost->rrdset_root, RRDSET_FLAG_BACKEND_IGNORE);
+    assert_int_equal(can_send_rrdset(prometheus_exporter_instance, localhost->rrdset_root), 0);
+    rrdset_flag_clear(localhost->rrdset_root, RRDSET_FLAG_BACKEND_IGNORE);
+
+    // TODO: test with a denying simple pattern
+
+    rrdset_flag_set(localhost->rrdset_root, RRDSET_FLAG_OBSOLETE);
+    assert_int_equal(can_send_rrdset(prometheus_exporter_instance, localhost->rrdset_root), 0);
+    rrdset_flag_clear(localhost->rrdset_root, RRDSET_FLAG_OBSOLETE);
+
+    localhost->rrdset_root->rrd_memory_mode = RRD_MEMORY_MODE_NONE;
+    prometheus_exporter_instance->config.options |= EXPORTING_SOURCE_DATA_AVERAGE;
+    assert_int_equal(can_send_rrdset(prometheus_exporter_instance, localhost->rrdset_root), 0);
+}
+
+static void test_prometheus_name_copy(void **state)
+{
+    (void)*state;
+
+    char destination_name[PROMETHEUS_ELEMENT_MAX + 1];
+    assert_int_equal(prometheus_name_copy(destination_name, "test-name", PROMETHEUS_ELEMENT_MAX), 9);
+
+    assert_string_equal(destination_name, "test_name");
+}
+
+static void test_prometheus_label_copy(void **state)
+{
+    (void)*state;
+
+    char destination_name[PROMETHEUS_ELEMENT_MAX + 1];
+    assert_int_equal(prometheus_label_copy(destination_name, "test\"\\\nlabel", PROMETHEUS_ELEMENT_MAX), 15);
+
+    assert_string_equal(destination_name, "test\\\"\\\\\\\nlabel");
+}
+
+static void test_prometheus_units_copy(void **state)
+{
+    (void)*state;
+
+    char destination_name[PROMETHEUS_ELEMENT_MAX + 1];
+    assert_string_equal(prometheus_units_copy(destination_name, "test-units", PROMETHEUS_ELEMENT_MAX, 0), "_test_units");
+    assert_string_equal(destination_name, "_test_units");
+
+    assert_string_equal(prometheus_units_copy(destination_name, "%", PROMETHEUS_ELEMENT_MAX, 0), "_percent");
+    assert_string_equal(prometheus_units_copy(destination_name, "test-units/s", PROMETHEUS_ELEMENT_MAX, 0), "_test_units_persec");
+
+    assert_string_equal(prometheus_units_copy(destination_name, "KiB", PROMETHEUS_ELEMENT_MAX, 1), "_KB");
+}
+
+static void test_format_host_labels_prometheus(void **state)
+{
+    struct engine *engine = *state;
+    struct instance *instance = engine->instance_root;
+
+    instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
+    instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+
+    format_host_labels_prometheus(instance, localhost);
+    assert_string_equal(buffer_tostring(instance->labels), "key1=\"netdata\",key2=\"value2\"");
+}
+
+static void rrd_stats_api_v1_charts_allmetrics_prometheus(void **state)
+{
+    (void)state;
+
+    BUFFER *buffer = buffer_create(0);
+
+    localhost->hostname = strdupz("test_hostname");
+    localhost->rrdset_root->family = strdupz("test_family");
+    localhost->rrdset_root->context = strdupz("test_context");
+
+    expect_function_call(__wrap_now_realtime_sec);
+    will_return(__wrap_now_realtime_sec, 2);
+
+    expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
+    will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+
+    rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(localhost, buffer, "test_server", "test_prefix", 0, 0);
+
+    assert_string_equal(
+        buffer_tostring(buffer),
+        "netdata_info{instance=\"test_hostname\",application=\"(null)\",version=\"(null)\"} 1\n"
+        "netdata_host_tags_info{key1=\"value1\",key2=\"value2\"} 1\n"
+        "netdata_host_tags{key1=\"value1\",key2=\"value2\"} 1\n"
+        "test_prefix_test_context{chart=\"chart_id\",family=\"test_family\",dimension=\"dimension_id\"} 690565856.0000000\n");
+
+    buffer_flush(buffer);
+
+    expect_function_call(__wrap_now_realtime_sec);
+    will_return(__wrap_now_realtime_sec, 2);
+
+    expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
+    will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+
+    rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(
+        localhost, buffer, "test_server", "test_prefix", 0, PROMETHEUS_OUTPUT_NAMES | PROMETHEUS_OUTPUT_TYPES);
+
+    assert_string_equal(
+        buffer_tostring(buffer),
+        "netdata_info{instance=\"test_hostname\",application=\"(null)\",version=\"(null)\"} 1\n"
+        "netdata_host_tags_info{key1=\"value1\",key2=\"value2\"} 1\n"
+        "netdata_host_tags{key1=\"value1\",key2=\"value2\"} 1\n"
+        "# COMMENT TYPE test_prefix_test_context gauge\n"
+        "test_prefix_test_context{chart=\"chart_name\",family=\"test_family\",dimension=\"dimension_name\"} 690565856.0000000\n");
+
+    buffer_flush(buffer);
+
+    expect_function_call(__wrap_now_realtime_sec);
+    will_return(__wrap_now_realtime_sec, 2);
+
+    expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
+    will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+
+    rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(localhost, buffer, "test_server", "test_prefix", 0, 0);
+
+    assert_string_equal(
+        buffer_tostring(buffer),
+        "netdata_info{instance=\"test_hostname\",application=\"(null)\",version=\"(null)\"} 1\n"
+        "netdata_host_tags_info{instance=\"test_hostname\",key1=\"value1\",key2=\"value2\"} 1\n"
+        "netdata_host_tags{instance=\"test_hostname\",key1=\"value1\",key2=\"value2\"} 1\n"
+        "test_prefix_test_context{chart=\"chart_id\",family=\"test_family\",dimension=\"dimension_id\",instance=\"test_hostname\"} 690565856.0000000\n");
+
+    free(localhost->rrdset_root->context);
+    free(localhost->rrdset_root->family);
+    free(localhost->hostname);
+    buffer_free(buffer);
+}
+
 #if ENABLE_PROMETHEUS_REMOTE_WRITE
 static void test_init_prometheus_remote_write_instance(void **state)
 {
@@ -1014,6 +1399,7 @@ static void test_aws_kinesis_connector_worker(void **state)
 {
     struct engine *engine = *state;
     struct instance *instance = engine->instance_root;
+    struct stats *stats = &instance->stats;
     BUFFER *buffer = instance->buffer;
 
     __real_mark_scheduled_instances(engine);
@@ -1059,8 +1445,25 @@ static void test_aws_kinesis_connector_worker(void **state)
     expect_not_value(__wrap_kinesis_get_result, lost_bytes, NULL);
     will_return(__wrap_kinesis_get_result, 0);
 
+    expect_function_call(__wrap_send_internal_metrics);
+    expect_value(__wrap_send_internal_metrics, instance, instance);
+    will_return(__wrap_send_internal_metrics, 0);
+
     aws_kinesis_connector_worker(instance);
 
+    assert_int_equal(stats->buffered_metrics, 0);
+    assert_int_equal(stats->buffered_bytes, 84);
+    assert_int_equal(stats->received_bytes, 0);
+    assert_int_equal(stats->sent_bytes, 84);
+    assert_int_equal(stats->sent_metrics, 1);
+    assert_int_equal(stats->lost_metrics, 0);
+    assert_int_equal(stats->receptions, 1);
+    assert_int_equal(stats->transmission_successes, 1);
+    assert_int_equal(stats->transmission_failures, 0);
+    assert_int_equal(stats->data_lost_events, 0);
+    assert_int_equal(stats->lost_bytes, 0);
+    assert_int_equal(stats->reconnects, 0);
+
     free(connector_specific_config->stream_name);
     free(connector_specific_config->auth_key_id);
     free(connector_specific_config->secure_key);
@@ -1170,7 +1573,7 @@ static void test_format_batch_mongodb(void **state)
     BUFFER *buffer = buffer_create(0);
     buffer_sprintf(buffer, "{ \"metric\": \"test_metric\" }\n");
     instance->buffer = buffer;
-    stats->chart_buffered_metrics = 1;
+    stats->buffered_metrics = 1;
 
     assert_int_equal(format_batch_mongodb(instance), 0);
 
@@ -1221,6 +1624,10 @@ static void test_mongodb_connector_worker(void **state)
     expect_not_value(__wrap_mongoc_collection_insert_many, error, NULL);
     will_return(__wrap_mongoc_collection_insert_many, true);
 
+    expect_function_call(__wrap_send_internal_metrics);
+    expect_value(__wrap_send_internal_metrics, instance, instance);
+    will_return(__wrap_send_internal_metrics, 0);
+
     mongodb_connector_worker(instance);
 
     assert_ptr_equal(connector_specific_data->first_buffer->insert, NULL);
@@ -1228,11 +1635,18 @@ static void test_mongodb_connector_worker(void **state)
     assert_ptr_equal(connector_specific_data->first_buffer, connector_specific_data->first_buffer->next);
 
     struct stats *stats = &instance->stats;
-    assert_int_equal(stats->chart_sent_bytes, 60);
-    assert_int_equal(stats->chart_transmission_successes, 1);
-    assert_int_equal(stats->chart_receptions, 1);
-    assert_int_equal(stats->chart_sent_bytes, 60);
-    assert_int_equal(stats->chart_sent_metrics, 0);
+    assert_int_equal(stats->buffered_metrics, 0);
+    assert_int_equal(stats->buffered_bytes, 0);
+    assert_int_equal(stats->received_bytes, 0);
+    assert_int_equal(stats->sent_bytes, 30);
+    assert_int_equal(stats->sent_metrics, 1);
+    assert_int_equal(stats->lost_metrics, 0);
+    assert_int_equal(stats->receptions, 1);
+    assert_int_equal(stats->transmission_successes, 1);
+    assert_int_equal(stats->transmission_failures, 0);
+    assert_int_equal(stats->data_lost_events, 0);
+    assert_int_equal(stats->lost_bytes, 0);
+    assert_int_equal(stats->reconnects, 0);
 
     free(connector_specific_config->database);
     free(connector_specific_config->collection);
@@ -1311,6 +1725,27 @@ int main(void)
     int test_res = cmocka_run_group_tests_name("exporting_engine", tests, NULL, NULL) +
                    cmocka_run_group_tests_name("labels_in_exporting_engine", label_tests, NULL, NULL);
 
+    const struct CMUnitTest internal_metrics_tests[] = {
+        cmocka_unit_test(test_create_main_rusage_chart),
+        cmocka_unit_test(test_send_main_rusage),
+        cmocka_unit_test(test_send_internal_metrics),
+    };
+
+    test_res += cmocka_run_group_tests_name("internal_metrics", internal_metrics_tests, NULL, NULL);
+
+    const struct CMUnitTest prometheus_web_api_tests[] = {
+        cmocka_unit_test_setup_teardown(test_can_send_rrdset, setup_prometheus, teardown_prometheus),
+        cmocka_unit_test_setup_teardown(test_prometheus_name_copy, setup_prometheus, teardown_prometheus),
+        cmocka_unit_test_setup_teardown(test_prometheus_label_copy, setup_prometheus, teardown_prometheus),
+        cmocka_unit_test_setup_teardown(test_prometheus_units_copy, setup_prometheus, teardown_prometheus),
+        cmocka_unit_test_setup_teardown(
+            test_format_host_labels_prometheus, setup_configured_engine, teardown_configured_engine),
+        cmocka_unit_test_setup_teardown(
+            rrd_stats_api_v1_charts_allmetrics_prometheus, setup_prometheus, teardown_prometheus),
+    };
+
+    test_res += cmocka_run_group_tests_name("prometheus_web_api", prometheus_web_api_tests, NULL, NULL);
+
 #if ENABLE_PROMETHEUS_REMOTE_WRITE
     const struct CMUnitTest prometheus_remote_write_tests[] = {
         cmocka_unit_test_setup_teardown(
diff --git a/exporting/tests/test_exporting_engine.h b/exporting/tests/test_exporting_engine.h
index 519e7ae17d..6848eb267c 100644
--- a/exporting/tests/test_exporting_engine.h
+++ b/exporting/tests/test_exporting_engine.h
@@ -91,7 +91,14 @@ int __wrap_prepare_buffers(struct engine *engine);
 
 int __wrap_notify_workers(struct engine *engine);
 
-int __wrap_send_internal_metrics(struct engine *engine);
+void __real_create_main_rusage_chart(RRDSET **st_rusage, RRDDIM **rd_user, RRDDIM **rd_system);
+void __wrap_create_main_rusage_chart(RRDSET **st_rusage, RRDDIM **rd_user, RRDDIM **rd_system);
+
+void __real_send_main_rusage(RRDSET *st_rusage, RRDDIM *rd_user, RRDDIM *rd_system);
+void __wrap_send_main_rusage(RRDSET *st_rusage, RRDDIM *rd_user, RRDDIM *rd_system);
+
+int __real_send_internal_metrics(struct instance *instance);
+int __wrap_send_internal_metrics(struct instance *instance);
 
 int __real_rrdhost_is_exportable(struct instance *instance, RRDHOST *host);
 int __wrap_rrdhost_is_exportable(struct instance *instance, RRDHOST *host);
@@ -166,6 +173,8 @@ int setup_rrdhost();
 int teardown_rrdhost();
 int setup_initialized_engine(void **state);
 int teardown_initialized_engine(void **state);
+int setup_prometheus(void **state);
+int teardown_prometheus(void **state);
 
 void init_connectors_in_tests(struct engine *engine);
 
diff --git a/health/REFERENCE.md b/health/REFERENCE.md
index d00afa8c60..9a53ce6b30 100644
--- a/health/REFERENCE.md
+++ b/health/REFERENCE.md
@@ -1,6 +1,7 @@
 <!--
 ---
 title: "Health configuration reference"
+date: 2020-03-31
 custom_edit_url: https://github.com/netdata/netdata/edit/master/health/REFERENCE.md
 ---
 -->
@@ -136,7 +137,7 @@ If you create a template using the `disk.io` context, it will apply an alarm to
 The alarm or template will be used only if the operating system of the host matches this list specified in `os`. The
 value is a space-separated list.
 
-The following example enables the entity on Linux, FreeBSD, and MacOS, but no other operating systems.
+The following example enables the entity on Linux, FreeBSD, and macOS, but no other operating systems.
 
 ```yaml
 os: linux freebsd macos
diff --git a/health/notifications/alarm-notify.sh.in b/health/notifications/alarm-notify.sh.in
index 8f0e14a27d..d580f38e7e 100755
--- a/health/notifications/alarm-notify.sh.in
+++ b/health/notifications/alarm-notify.sh.in
@@ -35,6 +35,7 @@
 #  - Microsoft Team notification by @tioumen
 #  - RocketChat notifications by @Hermsi1337 #3777
 #  - Google Hangouts Chat notifications by @EnzoAkira and @hendrikhofstadt
+#  - Dynatrace Event by @illumine
 
 # -----------------------------------------------------------------------------
 # testing notifications
@@ -170,6 +171,7 @@ awssns
 rocketchat
 sms
 hangouts
+dynatrace
 "
 
 # -----------------------------------------------------------------------------
@@ -366,6 +368,16 @@ IRC_NETWORK=
 # hangouts configs
 declare -A HANGOUTS_WEBHOOK_URI
 
+# dynatrace configs
+DYNATRACE_SPACE=
+DYNATRACE_SERVER=
+DYNATRACE_TOKEN=
+DYNATRACE_TAG_VALUE=
+DYNATRACE_ANNOTATION_TYPE=
+DYNATRACE_EVENT=
+SEND_DYNATRACE=
+
+
 # load the stock and user configuration files
 # these will overwrite the variables above
 
@@ -503,6 +515,14 @@ filter_recipient_by_criticality() {
 #shellcheck disable=SC2153
 { [ -z "${FLEEP_SERVER}" ] || [ -z "${FLEEP_SENDER}" ]; } && SEND_FLEEP="NO"
 
+# check dynatrace
+{ [ -z "${DYNATRACE_SPACE}" ] ||
+  [ -z "${DYNATRACE_SERVER}" ] ||
+  [ -z "${DYNATRACE_TOKEN}" ] ||
+  [ -z "${DYNATRACE_TAG_VALUE}" ] ||
+  [ -z "${DYNATRACE_EVENT}" ]; } && SEND_DYNATRACE="NO"
+
+
 if [ "${SEND_PUSHOVER}" = "YES" ] ||
 	[ "${SEND_SLACK}" = "YES" ] ||
 	[ "${SEND_ROCKETCHAT}" = "YES" ] ||
@@ -521,7 +541,8 @@ if [ "${SEND_PUSHOVER}" = "YES" ] ||
 	[ "${SEND_PROWL}" = "YES" ] ||
 	[ "${SEND_HANGOUTS}" = "YES" ] ||
 	[ "${SEND_CUSTOM}" = "YES" ] ||
-	[ "${SEND_MSTEAM}" = "YES" ]; then
+	[ "${SEND_MSTEAM}" = "YES" ] ||
+	[ "${SEND_DYNATRACE}" = "YES" ]; then
 	# if we need curl, check for the curl command
 	if [ -z "${curl}" ]; then
 		curl="$(command -v curl 2>/dev/null)"
@@ -547,6 +568,7 @@ if [ "${SEND_PUSHOVER}" = "YES" ] ||
 		SEND_PROWL="NO"
 		SEND_HANGOUTS="NO"
 		SEND_CUSTOM="NO"
+		SEND_DYNATRACE="NO"
 	fi
 fi
 
@@ -673,7 +695,9 @@ for method in "${SEND_EMAIL}" \
 	"${SEND_AWSSNS}" \
 	"${SEND_SYSLOG}" \
 	"${SEND_SMS}" \
-	"${SEND_MSTEAM}"; do
+	"${SEND_MSTEAM}" \
+    "${SEND_DYNATRACE}"; do
+	
 	if [ "${method}" == "YES" ]; then
 		proceed=1
 		break
@@ -1887,6 +1911,53 @@ EOF
 
 	return 1
 }
+# -----------------------------------------------------------------------------
+# Dynatrace sender
+send_dynatrace() {
+  [ "${SEND_DYNATRACE}" != "YES" ] && return 1
+
+  local dynatrace_url="${DYNATRACE_SERVER}/e/${DYNATRACE_SPACE}/api/v1/events"
+  local description="NetData Notification for: ${host} ${chart}.${name} is ${status}"
+  local payload=""
+  
+  payload=$(cat <<EOF
+{
+  "title": "NetData Alarm from ${host}",
+  "source" : "${DYNATRACE_ANNOTATION_TYPE}",
+  "description" : "${description}",
+  "eventType": "${DYNATRACE_EVENT}",
+   "attachRules":{
+     "tagRule":[{
+        "meTypes":["HOST"],
+        "tags":["${DYNATRACE_TAG_VALUE}"]
+     }]
+  },
+  "customProperties":{
+    "description": "${description}"
+  }
+}
+EOF
+)
+
+  # echo ${payload}
+
+  httpcode=$(docurl -X POST  -H "Authorization: Api-token ${DYNATRACE_TOKEN}"  -H "Content-Type: application/json" -d "${payload}"    ${dynatrace_url})
+  ret=$?
+
+
+  if [ ${ret} -eq 0 ]; then
+     if [ "${httpcode}" = "200" ]; then
+        info "sent ${DYNATRACE_EVENT} to ${DYNATRACE_SERVER}"
+        return 0
+     else
+        warning "Dynatrace ${DYNATRACE_SERVER} responded ${httpcode} notification for: ${host} ${chart}.${name} is ${status} was not sent!"
+        return 1
+     fi
+  else
+     error  "failed to sent ${DYNATRACE_EVENT} notification for: ${host} ${chart}.${name} is ${status} to  ${DYNATRACE_SERVER} with error code ${ret}."
+     return 1
+  fi
+}
 
 # -----------------------------------------------------------------------------
 # prepare the content of the notification
@@ -2391,6 +2462,12 @@ fi
 
 SENT_EMAIL=$?
 
+# -----------------------------------------------------------------------------
+# send the EVENT to Dynatrace
+send_dynatrace "${host}" "${chart}" "${name}"  "${status}"
+SENT_DYNATRACE=$?
+
+
 # -----------------------------------------------------------------------------
 # let netdata know
 for state in "${SENT_EMAIL}" \
@@ -2416,7 +2493,8 @@ for state in "${SENT_EMAIL}" \
 	"${SENT_AWSSNS}" \
 	"${SENT_SYSLOG}" \
 	"${SENT_SMS}" \
-	"${SENT_MSTEAM}"; do
+	"${SENT_MSTEAM}" \
+    "${SENT_DYNATRACE}"; do	
 	if [ "${state}" -eq 0 ]; then
 		# we sent something
 		exit 0
diff --git a/health/notifications/dynatrace/Makefile.inc b/health/notifications/dynatrace/Makefile.inc
new file mode 100644
index 0000000000..a2ae623fbc
--- /dev/null
+++ b/health/notifications/dynatrace/Makefile.inc
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_noinst_DATA += \
+    dynatrace/README.md \
+    dynatrace/Makefile.inc \
+    $(NULL)
+
diff --git a/health/notifications/dynatrace/README.md b/health/notifications/dynatrace/README.md
new file mode 100644
index 0000000000..3532968f05
--- /dev/null
+++ b/health/notifications/dynatrace/README.md
@@ -0,0 +1,36 @@
+<!--
+---
+title: "Dynatrace"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/dynatrace/README.md
+---
+-->
+
+# Dynatrace
+
+Dynatrace allows you to receive notifications using their Events REST API.
+
+See [the Dynatrace documentation](https://www.dynatrace.com/support/help/extend-dynatrace/dynatrace-api/environment-api/events/post-event/) about POSTing an event in the Events API for more details.
+
+
+
+You need:
+
+1.  Dynatrace Server. You can use the same on all your Netdata servers but make sure the server is network visible from your Netdata hosts.
+The Dynatrace server should be with protocol prefixed (`http://` or `https://`). For example: `https://monitor.example.com`
+This is a required parameter.
+2.  API Token. Generate a secure access API token that enables access to your Dynatrace monitoring data via the REST-based API.
+Generate a Dynatrace API authentication token. On your Dynatrace server, go to **Settings** --> **Integration** --> **Dynatrace API** --> **Generate token**.
+See [Dynatrace API - Authentication](https://www.dynatrace.com/support/help/extend-dynatrace/dynatrace-api/basics/dynatrace-api-authentication/) for more details.
+This is a required parameter.
+3.  API Space. This is the URL part of the page you have access in order to generate the API Token. For example, for my generated API Token the URL is:
+https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all
+In that case, my space is _2a93fe0e-4cd5-469a-9d0d-1a064235cfce_
+This is a required parameter.
+4. Generate a Server Tag. On your Dynatrace Server, go to **Settings** --> **Tags** --> **Manually applied tags** and create the Tag.
+The Netdata alarm is sent as a Dynatrace Event to be correlated with all those hosts tagged with this Tag you have created.
+This is a required parameter.
+5. Specify the Dynatrace event. This can be one of `CUSTOM_INFO`, `CUSTOM_ANNOTATION`, `CUSTOM_CONFIGURATION`, and `CUSTOM_DEPLOYMENT`. 
+The default value is `CUSTOM_INFO`.
+This is a required parameter.
+6. Specify the annotation type. This is the source of the Dynatrace event. Put whatever it fits you, for example, 
+_Netdata Alarm_, which is also the default value.
diff --git a/health/notifications/health_alarm_notify.conf b/health/notifications/health_alarm_notify.conf
index 5540dfc134..7baffb68b1 100755
--- a/health/notifications/health_alarm_notify.conf
+++ b/health/notifications/health_alarm_notify.conf
@@ -229,6 +229,43 @@ DEFAULT_RECIPIENT_EMAIL="root"
 # to not send HTML but Plain Text only emails.
 #EMAIL_PLAINTEXT_ONLY="YES"
 
+#------------------------------------------------------------------------------
+# Dynatrace global notification options
+#------------------------------------------------------------------------------
+# enable/disable sending Dynatrace notifications
+SEND_DYNATRACE="YES"
+
+# The Dynatrace server with protocol prefix (http:// or https://), example https://monitor.illumineit.com
+# Required
+DYNATRACE_SERVER=""
+
+# Generate a Dynatrace API authentication token
+# Read https://www.dynatrace.com/support/help/extend-dynatrace/dynatrace-api/basics/dynatrace-api-authentication/ 
+# On Dynatrace server goto Settings --> Integration --> Dynatrace API --> Generate token
+# Required
+DYNATRACE_TOKEN=""
+
+# Beware: Space is taken from dynatrace URL from browser when you create the TOKEN
+# Required
+DYNATRACE_SPACE=""
+
+# Generate a Server Tag. On the Dynatrace Server go to Settings --> Tags --> Manually applied tags create the Tag
+# The NetData alarm will be sent as a Dynatrace Event to be correlated with all those hosts tagged with this Tag 
+# you created.
+# Required
+DYNATRACE_TAG_VALUE=""
+
+# Change this to what you want
+DYNATRACE_ANNOTATION_TYPE="NetData Alarm"
+
+# This can be CUSTOM_INFO, CUSTOM_ANNOTATION, CUSTOM_CONFIGURATION, CUSTOM_DEPLOYMENT
+# Applying default value
+# Required
+DYNATRACE_EVENT="CUSTOM_INFO"
+
+
+DEFAULT_RECIPIENT_DYNATRACE=""
+
 #------------------------------------------------------------------------------
 # hangouts (google hangouts chat) global notification options
 
@@ -873,6 +910,7 @@ role_recipients_msteam[sysadmin]="${DEFAULT_RECIPIENT_MSTEAM}"
 
 role_recipients_rocketchat[sysadmin]="${DEFAULT_RECIPIENT_ROCKETCHAT}"
 
+role_recipients_dynatrace[sysadmin]="${DEFAULT_RECIPIENT_DYNATRACE}"
 # -----------------------------------------------------------------------------
 # DNS related alarms
 
@@ -922,6 +960,7 @@ role_recipients_rocketchat[domainadmin]="${DEFAULT_RECIPIENT_ROCKETCHAT}"
 
 role_recipients_sms[domainadmin]="${DEFAULT_RECIPIENT_SMS}"
 
+role_recipients_dynatrace[domainadmin]="${DEFAULT_RECIPIENT_DYNATRACE}"
 # -----------------------------------------------------------------------------
 # database servers alarms
 # mysql, redis, memcached, postgres, etc
@@ -972,6 +1011,7 @@ role_recipients_rocketchat[dba]="${DEFAULT_RECIPIENT_ROCKETCHAT}"
 
 role_recipients_sms[dba]="${DEFAULT_RECIPIENT_SMS}"
 
+role_recipients_dynatrace[dba]="${DEFAULT_RECIPIENT_DYNATRACE}"
 # -----------------------------------------------------------------------------
 # web servers alarms
 # apache, nginx, lighttpd, etc
@@ -1022,6 +1062,7 @@ role_recipients_rocketchat[webmaster]="${DEFAULT_RECIPIENT_ROCKETCHAT}"
 
 role_recipients_sms[webmaster]="${DEFAULT_RECIPIENT_SMS}"
 
+role_recipients_dynatrace[webmaster]="${DEFAULT_RECIPIENT_DYNATRACE}"
 # -----------------------------------------------------------------------------
 # proxy servers alarms
 # squid, etc
@@ -1072,7 +1113,7 @@ role_recipients_rocketchat[proxyadmin]="${DEFAULT_RECIPIENT_ROCKETCHAT}"
 
 role_recipients_sms[proxyadmin]="${DEFAULT_RECIPIENT_SMS}"
 
-
+role_recipients_dynatrace[proxyadmin]="${DEFAULT_RECIPIENT_DYNATRACE}"
 # -----------------------------------------------------------------------------
 # peripheral devices
 # UPS, photovoltaics, etc
@@ -1121,3 +1162,4 @@ role_recipients_rocketchat[sitemgr]="${DEFAULT_RECIPIENT_ROCKETCHAT}"
 
 role_recipients_sms[sitemgr]="${DEFAULT_RECIPIENT_SMS}"
 
+role_recipients_dynatrace[sitemgr]="${DEFAULT_RECIPIENT_DYNATRACE}"
\ No newline at end of file
diff --git a/libnetdata/config/appconfig.c b/libnetdata/config/appconfig.c
index 6c008234ad..39a57e5ff5 100644
--- a/libnetdata/config/appconfig.c
+++ b/libnetdata/config/appconfig.c
@@ -547,7 +547,8 @@ int appconfig_load(struct config *root, char *filename, int overwrite_used, cons
             s++;
 
             if (is_exporter_config) {
-                global_exporting_section = !(strcmp(s, CONFIG_SECTION_EXPORTING));
+                global_exporting_section =
+                    !(strcmp(s, CONFIG_SECTION_EXPORTING)) || !(strcmp(s, CONFIG_SECTION_PROMETHEUS));
                 if (unlikely(!global_exporting_section)) {
                     int rc;
                     rc = is_valid_connector(s, 0);
diff --git a/libnetdata/config/appconfig.h b/libnetdata/config/appconfig.h
index a0a3bd6329..8c2ce09a97 100644
--- a/libnetdata/config/appconfig.h
+++ b/libnetdata/config/appconfig.h
@@ -82,18 +82,19 @@
 
 #define CONFIG_FILENAME "netdata.conf"
 
-#define CONFIG_SECTION_GLOBAL    "global"
-#define CONFIG_SECTION_WEB       "web"
-#define CONFIG_SECTION_STATSD    "statsd"
-#define CONFIG_SECTION_PLUGINS   "plugins"
-#define CONFIG_SECTION_CLOUD     "cloud"
-#define CONFIG_SECTION_REGISTRY  "registry"
-#define CONFIG_SECTION_HEALTH    "health"
-#define CONFIG_SECTION_BACKEND   "backend"
-#define CONFIG_SECTION_STREAM    "stream"
-#define CONFIG_SECTION_EXPORTING "exporting:global"
-#define CONFIG_SECTION_HOST_LABEL   "host labels"
-#define EXPORTING_CONF           "exporting.conf"
+#define CONFIG_SECTION_GLOBAL     "global"
+#define CONFIG_SECTION_WEB        "web"
+#define CONFIG_SECTION_STATSD     "statsd"
+#define CONFIG_SECTION_PLUGINS    "plugins"
+#define CONFIG_SECTION_CLOUD      "cloud"
+#define CONFIG_SECTION_REGISTRY   "registry"
+#define CONFIG_SECTION_HEALTH     "health"
+#define CONFIG_SECTION_BACKEND    "backend"
+#define CONFIG_SECTION_STREAM     "stream"
+#define CONFIG_SECTION_EXPORTING  "exporting:global"
+#define CONFIG_SECTION_PROMETHEUS "prometheus:exporter"
+#define CONFIG_SECTION_HOST_LABEL "host labels"
+#define EXPORTING_CONF            "exporting.conf"
 
 // these are used to limit the configuration names and values lengths
 // they are not enforced by config.c functions (they will strdup() all strings, no matter of their length)
diff --git a/packaging/installer/install-required-packages.sh b/packaging/installer/install-required-packages.sh
index ae07e6642f..26a17192e1 100755
--- a/packaging/installer/install-required-packages.sh
+++ b/packaging/installer/install-required-packages.sh
@@ -192,7 +192,7 @@ get_os_release() {
   eval "$(grep -E "^(NAME|ID|ID_LIKE|VERSION|VERSION_ID)=" "${os_release_file}")"
   for x in "${ID}" ${ID_LIKE}; do
     case "${x,,}" in
-      alpine | arch | centos | debian | fedora | gentoo | sabayon | rhel | ubuntu | suse | opensuse-leap | sles | clear-linux-os)
+      alpine | arch | centos | clear-linux-os | debian | fedora | gentoo | manjaro | opensuse-leap | rhel | sabayon | sles | suse | ubuntu)
         distribution="${x}"
         version="${VERSION_ID}"
         codename="${VERSION}"
@@ -1047,7 +1047,9 @@ declare -A pkg_zip=(
 )
 
 validate_package_trees() {
-  validate_tree_${tree}
+  if type -t validate_tree_${tree} > /dev/null; then
+    validate_tree_${tree}
+  fi
 }
 
 validate_installed_package() {
diff --git a/packaging/installer/methods/macos.md b/packaging/installer/methods/macos.md
index 337e1ef596..d51ffccf23 100644
--- a/packaging/installer/methods/macos.md
+++ b/packaging/installer/methods/macos.md
@@ -75,7 +75,7 @@ sudo ./netdata-installer.sh --install /usr/local
 > Your Netdata configuration directory will be at `/usr/local/netdata/`, and your stock configuration directory will
 > be at **`/usr/local/lib/netdata/conf.d/`.**
 >
-> The installer will also install a startup plist to start Netdata when your Mac boots.
+> The installer will also install a startup plist to start Netdata when your macOS system boots.
 
 ## What's next?
 
diff --git a/packaging/maintainers/README.md b/packaging/maintainers/README.md
index 356e614437..5bea502e79 100644
--- a/packaging/maintainers/README.md
+++ b/packaging/maintainers/README.md
@@ -1,6 +1,7 @@
 <!--
 ---
 title: "Package Maintainers"
+date: 2020-03-31
 custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/maintainers/README.md
 ---
 -->
@@ -36,11 +37,11 @@ This page tracks the package maintainers for Netdata, for various operating syst
 
 ---
 
-## MacOS
+## macOS
 
 | System | URL | Core Developer | Package Maintainer
 |:-:|:-:|:-:|:-:|
-| MacOS Homebrew Formula|[link](https://github.com/Homebrew/homebrew-core/blob/master/Formula/netdata.rb)|@vlvkobal|@rickard-von-essen
+| macOS Homebrew Formula|[link](https://github.com/Homebrew/homebrew-core/blob/master/Formula/netdata.rb)|@vlvkobal|@rickard-von-essen
 
 ---