diff --git a/aclk/aclk.c b/aclk/aclk.c index b7b6a3d81c..4e21b87983 100644 --- a/aclk/aclk.c +++ b/aclk/aclk.c @@ -72,7 +72,7 @@ static void aclk_ssl_keylog_cb(const SSL *ssl, const char *line) if (!ssl_log_file) ssl_log_file = fopen(ssl_log_filename, "a"); if (!ssl_log_file) { - error("Couldn't open ssl_log file (%s) for append.", ssl_log_filename); + netdata_log_error("Couldn't open ssl_log file (%s) for append.", ssl_log_filename); return; } fputs(line, ssl_log_file); @@ -107,14 +107,14 @@ static int load_private_key() long bytes_read; char *private_key = read_by_filename(filename, &bytes_read); if (!private_key) { - error("Claimed agent cannot establish ACLK - unable to load private key '%s' failed.", filename); + netdata_log_error("Claimed agent cannot establish ACLK - unable to load private key '%s' failed.", filename); return 1; } debug(D_ACLK, "Claimed agent loaded private key len=%ld bytes", bytes_read); BIO *key_bio = BIO_new_mem_buf(private_key, -1); if (key_bio==NULL) { - error("Claimed agent cannot establish ACLK - failed to create BIO for key"); + netdata_log_error("Claimed agent cannot establish ACLK - failed to create BIO for key"); goto biofailed; } @@ -125,13 +125,13 @@ static int load_private_key() NULL, NULL); if (!aclk_dctx) { - error("Loading private key (from claiming) failed - no OpenSSL Decoders found"); + netdata_log_error("Loading private key (from claiming) failed - no OpenSSL Decoders found"); goto biofailed; } // this is necesseary to avoid RSA key with wrong size if (!OSSL_DECODER_from_bio(aclk_dctx, key_bio)) { - error("Decoding private key (from claiming) failed - invalid format."); + netdata_log_error("Decoding private key (from claiming) failed - invalid format."); goto biofailed; } #else @@ -145,7 +145,7 @@ static int load_private_key() } char err[512]; ERR_error_string_n(ERR_get_error(), err, sizeof(err)); - error("Claimed agent cannot establish ACLK - cannot create private key: %s", err); + netdata_log_error("Claimed agent cannot establish ACLK - cannot create private key: %s", err); biofailed: freez(private_key); @@ -204,7 +204,7 @@ static int wait_till_agent_claim_ready() // We trap the impossible NULL here to keep the linter happy without using a fatal() in the code. char *cloud_base_url = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", NULL); if (cloud_base_url == NULL) { - error("Do not move the cloud base url out of post_conf_load!!"); + netdata_log_error("Do not move the cloud base url out of post_conf_load!!"); return 1; } @@ -212,7 +212,7 @@ static int wait_till_agent_claim_ready() // TODO make it without malloc/free memset(&url, 0, sizeof(url_t)); if (url_parse(cloud_base_url, &url)) { - error("Agent is claimed but the URL in configuration key \"cloud base url\" is invalid, please fix"); + netdata_log_error("Agent is claimed but the URL in configuration key \"cloud base url\" is invalid, please fix"); url_t_destroy(&url); sleep(5); continue; @@ -243,7 +243,7 @@ void aclk_mqtt_wss_log_cb(mqtt_wss_log_type_t log_type, const char* str) debug(D_ACLK, "%s", str); return; default: - error("Unknown log type from mqtt_wss"); + netdata_log_error("Unknown log type from mqtt_wss"); } } @@ -255,7 +255,7 @@ static void msg_callback(const char *topic, const void *msg, size_t msglen, int debug(D_ACLK, "Got Message From Broker Topic \"%s\" QOS %d", topic, qos); if (aclk_shared_state.mqtt_shutdown_msg_id > 0) { - error("Link is shutting down. Ignoring incoming message."); + netdata_log_error("Link is shutting down. Ignoring incoming message."); return; } @@ -277,7 +277,7 @@ static void msg_callback(const char *topic, const void *msg, size_t msglen, int snprintf(filename, FN_MAX_LEN, ACLK_LOG_CONVERSATION_DIR "/%010d-rx-%s.bin", ACLK_GET_CONV_LOG_NEXT(), msgtype); logfd = open(filename, O_CREAT | O_TRUNC | O_WRONLY, S_IRUSR | S_IWUSR ); if(logfd < 0) - error("Error opening ACLK Conversation logfile \"%s\" for RX message.", filename); + netdata_log_error("Error opening ACLK Conversation logfile \"%s\" for RX message.", filename); write(logfd, msg, msglen); close(logfd); #endif @@ -308,7 +308,7 @@ static int read_query_thread_count() threads = MAX(threads, 2); threads = config_get_number(CONFIG_SECTION_CLOUD, "query thread count", threads); if(threads < 1) { - error("You need at least one query thread. Overriding configured setting of \"%d\"", threads); + netdata_log_error("You need at least one query thread. Overriding configured setting of \"%d\"", threads); threads = 1; config_set_number(CONFIG_SECTION_CLOUD, "query thread count", threads); } @@ -365,13 +365,13 @@ static inline void mqtt_connected_actions(mqtt_wss_client client) char *topic = (char*)aclk_get_topic(ACLK_TOPICID_COMMAND); if (!topic) - error("Unable to fetch topic for COMMAND (to subscribe)"); + netdata_log_error("Unable to fetch topic for COMMAND (to subscribe)"); else mqtt_wss_subscribe(client, topic, 1); topic = (char*)aclk_get_topic(ACLK_TOPICID_CMD_NG_V1); if (!topic) - error("Unable to fetch topic for protobuf COMMAND (to subscribe)"); + netdata_log_error("Unable to fetch topic for protobuf COMMAND (to subscribe)"); else mqtt_wss_subscribe(client, topic, 1); @@ -399,7 +399,7 @@ void aclk_graceful_disconnect(mqtt_wss_client client) time_t t = now_monotonic_sec(); while (!mqtt_wss_service(client, 100)) { if (now_monotonic_sec() - t >= 2) { - error("Wasn't able to gracefully shutdown ACLK in time!"); + netdata_log_error("Wasn't able to gracefully shutdown ACLK in time!"); break; } if (aclk_shared_state.mqtt_shutdown_msg_rcvd) { @@ -786,7 +786,7 @@ void *aclk_main(void *ptr) ACLK_PROXY_TYPE proxy_type; aclk_get_proxy(&proxy_type); if (proxy_type == PROXY_TYPE_SOCKS5) { - error("SOCKS5 proxy is not supported by ACLK-NG yet."); + netdata_log_error("SOCKS5 proxy is not supported by ACLK-NG yet."); static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; return NULL; } @@ -811,7 +811,7 @@ void *aclk_main(void *ptr) goto exit; if (!(mqttwss_client = mqtt_wss_new("mqtt_wss", aclk_mqtt_wss_log_cb, msg_callback, puback_callback))) { - error("Couldn't initialize MQTT_WSS network library"); + netdata_log_error("Couldn't initialize MQTT_WSS network library"); goto exit; } @@ -906,7 +906,7 @@ void aclk_host_state_update(RRDHOST *host, int cmd) ret = get_node_id(&host->host_uuid, &node_id); if (ret > 0) { // this means we were not able to check if node_id already present - error("Unable to check for node_id. Ignoring the host state update."); + netdata_log_error("Unable to check for node_id. Ignoring the host state update."); return; } if (ret < 0) { diff --git a/aclk/aclk_otp.c b/aclk/aclk_otp.c index a37c0bb9c2..46d0f6213d 100644 --- a/aclk/aclk_otp.c +++ b/aclk/aclk_otp.c @@ -38,7 +38,7 @@ struct auth_data { #define PARSE_ENV_JSON_CHK_TYPE(it, type, name) \ if (json_object_get_type(json_object_iter_peek_value(it)) != type) { \ - error("value of key \"%s\" should be %s", name, #type); \ + netdata_log_error("value of key \"%s\" should be %s", name, #type); \ goto exit; \ } @@ -55,7 +55,7 @@ static int parse_passwd_response(const char *json_str, struct auth_data *auth) { json = json_tokener_parse(json_str); if (!json) { - error("JSON-C failed to parse the payload of http response of /env endpoint"); + netdata_log_error("JSON-C failed to parse the payload of http response of /env endpoint"); return 1; } @@ -88,26 +88,26 @@ static int parse_passwd_response(const char *json_str, struct auth_data *auth) { PARSE_ENV_JSON_CHK_TYPE(&it, json_type_array, JSON_KEY_TOPICS) if (aclk_generate_topic_cache(json_object_iter_peek_value(&it))) { - error("Failed to generate topic cache!"); + netdata_log_error("Failed to generate topic cache!"); goto exit; } json_object_iter_next(&it); continue; } - error("Unknown key \"%s\" in passwd response payload. Ignoring", json_object_iter_peek_name(&it)); + netdata_log_error("Unknown key \"%s\" in passwd response payload. Ignoring", json_object_iter_peek_name(&it)); json_object_iter_next(&it); } if (!auth->client_id) { - error(JSON_KEY_CLIENTID " is compulsory key in /password response"); + netdata_log_error(JSON_KEY_CLIENTID " is compulsory key in /password response"); goto exit; } if (!auth->passwd) { - error(JSON_KEY_PASS " is compulsory in /password response"); + netdata_log_error(JSON_KEY_PASS " is compulsory in /password response"); goto exit; } if (!auth->username) { - error(JSON_KEY_USER " is compulsory in /password response"); + netdata_log_error(JSON_KEY_USER " is compulsory in /password response"); goto exit; } @@ -126,11 +126,11 @@ exit: static const char *get_json_str_by_path(json_object *json, const char *path) { json_object *ptr; if (json_pointer_get(json, path, &ptr)) { - error("Missing compulsory key \"%s\" in error response", path); + netdata_log_error("Missing compulsory key \"%s\" in error response", path); return NULL; } if (json_object_get_type(ptr) != json_type_string) { - error("Value of Key \"%s\" in error response should be string", path); + netdata_log_error("Value of Key \"%s\" in error response should be string", path); return NULL; } return json_object_get_string(ptr); @@ -147,7 +147,7 @@ static int aclk_parse_otp_error(const char *json_str) { json = json_tokener_parse(json_str); if (!json) { - error("JSON-C failed to parse the payload of http response of /env endpoint"); + netdata_log_error("JSON-C failed to parse the payload of http response of /env endpoint"); return 1; } @@ -163,7 +163,7 @@ static int aclk_parse_otp_error(const char *json_str) { // optional field if (!json_pointer_get(json, "/" JSON_KEY_ERTRY, &ptr)) { if (json_object_get_type(ptr) != json_type_boolean) { - error("Error response Key " "/" JSON_KEY_ERTRY " should be of boolean type"); + netdata_log_error("Error response Key " "/" JSON_KEY_ERTRY " should be of boolean type"); goto exit; } block_retry = json_object_get_boolean(ptr); @@ -172,7 +172,7 @@ static int aclk_parse_otp_error(const char *json_str) { // optional field if (!json_pointer_get(json, "/" JSON_KEY_EDELAY, &ptr)) { if (json_object_get_type(ptr) != json_type_int) { - error("Error response Key " "/" JSON_KEY_EDELAY " should be of integer type"); + netdata_log_error("Error response Key " "/" JSON_KEY_EDELAY " should be of integer type"); goto exit; } backoff = json_object_get_int(ptr); @@ -184,7 +184,7 @@ static int aclk_parse_otp_error(const char *json_str) { if (backoff > 0) aclk_block_until = now_monotonic_sec() + backoff; - error("Cloud returned EC=\"%s\", Msg-Key:\"%s\", Msg:\"%s\", BlockRetry:%s, Backoff:%ds (-1 unset by cloud)", ec, ek, emsg, block_retry > 0 ? "true" : "false", backoff); + netdata_log_error("Cloud returned EC=\"%s\", Msg-Key:\"%s\", Msg:\"%s\", BlockRetry:%s, Backoff:%ds (-1 unset by cloud)", ec, ek, emsg, block_retry > 0 ? "true" : "false", backoff); rc = 0; exit: json_object_put(json); @@ -205,7 +205,7 @@ static int aclk_parse_otp_error(const char *json_str) { json = json_tokener_parse(json_str); if (!json) { - error("JSON-C failed to parse the payload of http response of /env endpoint"); + netdata_log_error("JSON-C failed to parse the payload of http response of /env endpoint"); return 1; } @@ -236,7 +236,7 @@ static int aclk_parse_otp_error(const char *json_str) { } if (!strcmp(json_object_iter_peek_name(&it), JSON_KEY_EDELAY)) { if (json_object_get_type(json_object_iter_peek_value(&it)) != json_type_int) { - error("value of key " JSON_KEY_EDELAY " should be integer"); + netdata_log_error("value of key " JSON_KEY_EDELAY " should be integer"); goto exit; } @@ -246,7 +246,7 @@ static int aclk_parse_otp_error(const char *json_str) { } if (!strcmp(json_object_iter_peek_name(&it), JSON_KEY_ERTRY)) { if (json_object_get_type(json_object_iter_peek_value(&it)) != json_type_boolean) { - error("value of key " JSON_KEY_ERTRY " should be integer"); + netdata_log_error("value of key " JSON_KEY_ERTRY " should be integer"); goto exit; } @@ -254,7 +254,7 @@ static int aclk_parse_otp_error(const char *json_str) { json_object_iter_next(&it); continue; } - error("Unknown key \"%s\" in error response payload. Ignoring", json_object_iter_peek_name(&it)); + netdata_log_error("Unknown key \"%s\" in error response payload. Ignoring", json_object_iter_peek_name(&it)); json_object_iter_next(&it); } @@ -264,7 +264,7 @@ static int aclk_parse_otp_error(const char *json_str) { if (backoff > 0) aclk_block_until = now_monotonic_sec() + backoff; - error("Cloud returned EC=\"%s\", Msg-Key:\"%s\", Msg:\"%s\", BlockRetry:%s, Backoff:%ds (-1 unset by cloud)", ec, ek, emsg, block_retry > 0 ? "true" : "false", backoff); + netdata_log_error("Cloud returned EC=\"%s\", Msg-Key:\"%s\", Msg:\"%s\", BlockRetry:%s, Backoff:%ds (-1 unset by cloud)", ec, ek, emsg, block_retry > 0 ? "true" : "false", backoff); rc = 0; exit: json_object_put(json); @@ -301,7 +301,7 @@ inline static int base64_decode_helper(unsigned char *out, int *outl, const unsi EVP_DecodeFinal(ctx, remaining_data, &remainder); EVP_ENCODE_CTX_free(ctx); if (remainder) { - error("Unexpected data at EVP_DecodeFinal"); + netdata_log_error("Unexpected data at EVP_DecodeFinal"); return 1; } return 0; @@ -322,12 +322,12 @@ int aclk_get_otp_challenge(url_t *target, const char *agent_id, unsigned char ** req.url = (char *)buffer_tostring(url); if (aclk_https_request(&req, &resp)) { - error ("ACLK_OTP Challenge failed"); + netdata_log_error("ACLK_OTP Challenge failed"); buffer_free(url); return 1; } if (resp.http_code != 200) { - error ("ACLK_OTP Challenge HTTP code not 200 OK (got %d)", resp.http_code); + netdata_log_error("ACLK_OTP Challenge HTTP code not 200 OK (got %d)", resp.http_code); buffer_free(url); if (resp.payload_size) aclk_parse_otp_error(resp.payload); @@ -339,32 +339,32 @@ int aclk_get_otp_challenge(url_t *target, const char *agent_id, unsigned char ** json_object *json = json_tokener_parse(resp.payload); if (!json) { - error ("Couldn't parse HTTP GET challenge payload"); + netdata_log_error("Couldn't parse HTTP GET challenge payload"); goto cleanup_resp; } json_object *challenge_json; if (!json_object_object_get_ex(json, "challenge", &challenge_json)) { - error ("No key named \"challenge\" in the returned JSON"); + netdata_log_error("No key named \"challenge\" in the returned JSON"); goto cleanup_json; } if (!json_object_is_type(challenge_json, json_type_string)) { - error ("\"challenge\" is not a string JSON type"); + netdata_log_error("\"challenge\" is not a string JSON type"); goto cleanup_json; } const char *challenge_base64; if (!(challenge_base64 = json_object_get_string(challenge_json))) { - error("Failed to extract challenge from JSON object"); + netdata_log_error("Failed to extract challenge from JSON object"); goto cleanup_json; } if (strlen(challenge_base64) != CHALLENGE_LEN_BASE64) { - error("Received Challenge has unexpected length of %zu (expected %d)", strlen(challenge_base64), CHALLENGE_LEN_BASE64); + netdata_log_error("Received Challenge has unexpected length of %zu (expected %d)", strlen(challenge_base64), CHALLENGE_LEN_BASE64); goto cleanup_json; } *challenge = mallocz((CHALLENGE_LEN_BASE64 / 4) * 3); base64_decode_helper(*challenge, challenge_bytes, (const unsigned char*)challenge_base64, strlen(challenge_base64)); if (*challenge_bytes != CHALLENGE_LEN) { - error("Unexpected challenge length of %d instead of %d", *challenge_bytes, CHALLENGE_LEN); + netdata_log_error("Unexpected challenge length of %d instead of %d", *challenge_bytes, CHALLENGE_LEN); freez(*challenge); *challenge = NULL; goto cleanup_json; @@ -405,11 +405,11 @@ int aclk_send_otp_response(const char *agent_id, const unsigned char *response, req.payload_size = strlen(req.payload); if (aclk_https_request(&req, &resp)) { - error ("ACLK_OTP Password error trying to post result to password"); + netdata_log_error("ACLK_OTP Password error trying to post result to password"); goto cleanup_buffers; } if (resp.http_code != 201) { - error ("ACLK_OTP Password HTTP code not 201 Created (got %d)", resp.http_code); + netdata_log_error("ACLK_OTP Password HTTP code not 201 Created (got %d)", resp.http_code); if (resp.payload_size) aclk_parse_otp_error(resp.payload); goto cleanup_response; @@ -417,7 +417,7 @@ int aclk_send_otp_response(const char *agent_id, const unsigned char *response, netdata_log_info("ACLK_OTP Got Password from Cloud"); if (parse_passwd_response(resp.payload, mqtt_auth)){ - error("Error parsing response of password endpoint"); + netdata_log_error("Error parsing response of password endpoint"); goto cleanup_response; } @@ -470,7 +470,7 @@ static int private_decrypt(RSA *p_key, unsigned char * enc_data, int data_len, u { char err[512]; ERR_error_string_n(ERR_get_error(), err, sizeof(err)); - error("Decryption of the challenge failed: %s", err); + netdata_log_error("Decryption of the challenge failed: %s", err); } return result; } @@ -486,13 +486,13 @@ int aclk_get_mqtt_otp(RSA *p_key, char **mqtt_id, char **mqtt_usr, char **mqtt_p char *agent_id = get_agent_claimid(); if (agent_id == NULL) { - error("Agent was not claimed - cannot perform challenge/response"); + netdata_log_error("Agent was not claimed - cannot perform challenge/response"); return 1; } // Get Challenge if (aclk_get_otp_challenge(target, agent_id, &challenge, &challenge_bytes)) { - error("Error getting challenge"); + netdata_log_error("Error getting challenge"); freez(agent_id); return 1; } @@ -501,7 +501,7 @@ int aclk_get_mqtt_otp(RSA *p_key, char **mqtt_id, char **mqtt_usr, char **mqtt_p unsigned char *response_plaintext; int response_plaintext_bytes = private_decrypt(p_key, challenge, challenge_bytes, &response_plaintext); if (response_plaintext_bytes < 0) { - error ("Couldn't decrypt the challenge received"); + netdata_log_error("Couldn't decrypt the challenge received"); freez(response_plaintext); freez(challenge); freez(agent_id); @@ -512,7 +512,7 @@ int aclk_get_mqtt_otp(RSA *p_key, char **mqtt_id, char **mqtt_usr, char **mqtt_p // Encode and Send Challenge struct auth_data data = { .client_id = NULL, .passwd = NULL, .username = NULL }; if (aclk_send_otp_response(agent_id, response_plaintext, response_plaintext_bytes, target, &data)) { - error("Error getting response"); + netdata_log_error("Error getting response"); freez(response_plaintext); freez(agent_id); return 1; @@ -549,12 +549,12 @@ static int parse_json_env_transport(json_object *json, aclk_transport_desc_t *tr if (!strcmp(json_object_iter_peek_name(&it), JSON_KEY_TRP_TYPE)) { PARSE_ENV_JSON_CHK_TYPE(&it, json_type_string, JSON_KEY_TRP_TYPE) if (trp->type != ACLK_TRP_UNKNOWN) { - error(JSON_KEY_TRP_TYPE " set already"); + netdata_log_error(JSON_KEY_TRP_TYPE " set already"); goto exit; } trp->type = aclk_transport_type_t_from_str(json_object_get_string(json_object_iter_peek_value(&it))); if (trp->type == ACLK_TRP_UNKNOWN) { - error(JSON_KEY_TRP_TYPE " unknown type \"%s\"", json_object_get_string(json_object_iter_peek_value(&it))); + netdata_log_error(JSON_KEY_TRP_TYPE " unknown type \"%s\"", json_object_get_string(json_object_iter_peek_value(&it))); goto exit; } json_object_iter_next(&it); @@ -564,25 +564,25 @@ static int parse_json_env_transport(json_object *json, aclk_transport_desc_t *tr if (!strcmp(json_object_iter_peek_name(&it), JSON_KEY_TRP_ENDPOINT)) { PARSE_ENV_JSON_CHK_TYPE(&it, json_type_string, JSON_KEY_TRP_ENDPOINT) if (trp->endpoint) { - error(JSON_KEY_TRP_ENDPOINT " set already"); + netdata_log_error(JSON_KEY_TRP_ENDPOINT " set already"); goto exit; } trp->endpoint = strdupz(json_object_get_string(json_object_iter_peek_value(&it))); json_object_iter_next(&it); continue; } - - error ("unknown JSON key in dictionary (\"%s\")", json_object_iter_peek_name(&it)); + + netdata_log_error("unknown JSON key in dictionary (\"%s\")", json_object_iter_peek_name(&it)); json_object_iter_next(&it); } if (!trp->endpoint) { - error (JSON_KEY_TRP_ENDPOINT " is missing from JSON dictionary"); + netdata_log_error(JSON_KEY_TRP_ENDPOINT " is missing from JSON dictionary"); goto exit; } if (trp->type == ACLK_TRP_UNKNOWN) { - error ("transport type not set"); + netdata_log_error("transport type not set"); goto exit; } @@ -598,7 +598,7 @@ static int parse_json_env_transports(json_object *json_array, aclk_env_t *env) { json_object *obj; if (env->transports) { - error("transports have been set already"); + netdata_log_error("transports have been set already"); return 1; } @@ -610,7 +610,7 @@ static int parse_json_env_transports(json_object *json_array, aclk_env_t *env) { trp = callocz(1, sizeof(aclk_transport_desc_t)); obj = json_object_array_get_idx(json_array, i); if (parse_json_env_transport(obj, trp)) { - error("error parsing transport idx %d", (int)i); + netdata_log_error("error parsing transport idx %d", (int)i); freez(trp); return 1; } @@ -626,14 +626,14 @@ static int parse_json_env_transports(json_object *json_array, aclk_env_t *env) { static int parse_json_backoff_int(struct json_object_iterator *it, int *out, const char* name, int min, int max) { if (!strcmp(json_object_iter_peek_name(it), name)) { if (json_object_get_type(json_object_iter_peek_value(it)) != json_type_int) { - error("Could not parse \"%s\". Not an integer as expected.", name); + netdata_log_error("Could not parse \"%s\". Not an integer as expected.", name); return MATCHED_ERROR; } *out = json_object_get_int(json_object_iter_peek_value(it)); if (*out < min || *out > max) { - error("Value of \"%s\"=%d out of range (%d-%d).", name, *out, min, max); + netdata_log_error("Value of \"%s\"=%d out of range (%d-%d).", name, *out, min, max); return MATCHED_ERROR; } @@ -675,7 +675,7 @@ static int parse_json_backoff(json_object *json, aclk_backoff_t *backoff) { continue; } - error ("unknown JSON key in dictionary (\"%s\")", json_object_iter_peek_name(&it)); + netdata_log_error("unknown JSON key in dictionary (\"%s\")", json_object_iter_peek_name(&it)); json_object_iter_next(&it); } @@ -687,7 +687,7 @@ static int parse_json_env_caps(json_object *json, aclk_env_t *env) { const char *str; if (env->capabilities) { - error("transports have been set already"); + netdata_log_error("transports have been set already"); return 1; } @@ -702,12 +702,12 @@ static int parse_json_env_caps(json_object *json, aclk_env_t *env) { for (size_t i = 0; i < env->capability_count; i++) { obj = json_object_array_get_idx(json, i); if (json_object_get_type(obj) != json_type_string) { - error("Capability at index %d not a string!", (int)i); + netdata_log_error("Capability at index %d not a string!", (int)i); return 1; } str = json_object_get_string(obj); if (!str) { - error("Error parsing capabilities"); + netdata_log_error("Error parsing capabilities"); return 1; } env->capabilities[i] = strdupz(str); @@ -723,7 +723,7 @@ static int parse_json_env(const char *json_str, aclk_env_t *env) { json = json_tokener_parse(json_str); if (!json) { - error("JSON-C failed to parse the payload of http response of /env endpoint"); + netdata_log_error("JSON-C failed to parse the payload of http response of /env endpoint"); return 1; } @@ -734,7 +734,7 @@ static int parse_json_env(const char *json_str, aclk_env_t *env) { if (!strcmp(json_object_iter_peek_name(&it), JSON_KEY_AUTH_ENDPOINT)) { PARSE_ENV_JSON_CHK_TYPE(&it, json_type_string, JSON_KEY_AUTH_ENDPOINT) if (env->auth_endpoint) { - error("authEndpoint set already"); + netdata_log_error("authEndpoint set already"); goto exit; } env->auth_endpoint = strdupz(json_object_get_string(json_object_iter_peek_value(&it))); @@ -745,7 +745,7 @@ static int parse_json_env(const char *json_str, aclk_env_t *env) { if (!strcmp(json_object_iter_peek_name(&it), JSON_KEY_ENC)) { PARSE_ENV_JSON_CHK_TYPE(&it, json_type_string, JSON_KEY_ENC) if (env->encoding != ACLK_ENC_UNKNOWN) { - error(JSON_KEY_ENC " set already"); + netdata_log_error(JSON_KEY_ENC " set already"); goto exit; } env->encoding = aclk_encoding_type_t_from_str(json_object_get_string(json_object_iter_peek_value(&it))); @@ -768,7 +768,7 @@ static int parse_json_env(const char *json_str, aclk_env_t *env) { if (parse_json_backoff(json_object_iter_peek_value(&it), &env->backoff)) { env->backoff.base = 0; - error("Error parsing Backoff parameters in env"); + netdata_log_error("Error parsing Backoff parameters in env"); goto exit; } @@ -780,7 +780,7 @@ static int parse_json_env(const char *json_str, aclk_env_t *env) { PARSE_ENV_JSON_CHK_TYPE(&it, json_type_array, JSON_KEY_CAPS) if (parse_json_env_caps(json_object_iter_peek_value(&it), env)) { - error("Error parsing capabilities list"); + netdata_log_error("Error parsing capabilities list"); goto exit; } @@ -788,25 +788,25 @@ static int parse_json_env(const char *json_str, aclk_env_t *env) { continue; } - error ("unknown JSON key in dictionary (\"%s\")", json_object_iter_peek_name(&it)); + netdata_log_error("unknown JSON key in dictionary (\"%s\")", json_object_iter_peek_name(&it)); json_object_iter_next(&it); } // Check all compulsory keys have been set if (env->transport_count < 1) { - error("env has to return at least one transport"); + netdata_log_error("env has to return at least one transport"); goto exit; } if (!env->auth_endpoint) { - error(JSON_KEY_AUTH_ENDPOINT " is compulsory"); + netdata_log_error(JSON_KEY_AUTH_ENDPOINT " is compulsory"); goto exit; } if (env->encoding == ACLK_ENC_UNKNOWN) { - error(JSON_KEY_ENC " is compulsory"); + netdata_log_error(JSON_KEY_ENC " is compulsory"); goto exit; } if (!env->backoff.base) { - error(JSON_KEY_BACKOFF " is compulsory"); + netdata_log_error(JSON_KEY_BACKOFF " is compulsory"); goto exit; } @@ -830,7 +830,7 @@ int aclk_get_env(aclk_env_t *env, const char* aclk_hostname, int aclk_port) { char *agent_id = get_agent_claimid(); if (agent_id == NULL) { - error("Agent was not claimed - cannot perform challenge/response"); + netdata_log_error("Agent was not claimed - cannot perform challenge/response"); buffer_free(buf); return 1; } @@ -843,13 +843,13 @@ int aclk_get_env(aclk_env_t *env, const char* aclk_hostname, int aclk_port) { req.port = aclk_port; req.url = buf->buffer; if (aclk_https_request(&req, &resp)) { - error("Error trying to contact env endpoint"); + netdata_log_error("Error trying to contact env endpoint"); https_req_response_free(&resp); buffer_free(buf); return 2; } if (resp.http_code != 200) { - error("The HTTP code not 200 OK (Got %d)", resp.http_code); + netdata_log_error("The HTTP code not 200 OK (Got %d)", resp.http_code); if (resp.payload_size) aclk_parse_otp_error(resp.payload); https_req_response_free(&resp); @@ -858,14 +858,14 @@ int aclk_get_env(aclk_env_t *env, const char* aclk_hostname, int aclk_port) { } if (!resp.payload || !resp.payload_size) { - error("Unexpected empty payload as response to /env call"); + netdata_log_error("Unexpected empty payload as response to /env call"); https_req_response_free(&resp); buffer_free(buf); return 4; } if (parse_json_env(resp.payload, env)) { - error ("error parsing /env message"); + netdata_log_error("error parsing /env message"); https_req_response_free(&resp); buffer_free(buf); return 5; diff --git a/aclk/aclk_proxy.c b/aclk/aclk_proxy.c index 1701eb8e85..4af46208f7 100644 --- a/aclk/aclk_proxy.c +++ b/aclk/aclk_proxy.c @@ -85,7 +85,7 @@ static inline void safe_log_proxy_error(char *str, const char *proxy) { char *log = strdupz(proxy); safe_log_proxy_censor(log); - error("%s Provided Value:\"%s\"", str, log); + netdata_log_error("%s Provided Value:\"%s\"", str, log); freez(log); } diff --git a/aclk/aclk_query.c b/aclk/aclk_query.c index c640d0b701..a63bf3dc1b 100644 --- a/aclk/aclk_query.c +++ b/aclk/aclk_query.c @@ -164,7 +164,7 @@ static int http_api_v2(struct aclk_query_thread *query_thr, aclk_query_t query) w->response.zinitialized = true; w->response.zoutput = true; } else - error("Failed to initialize zlib. Proceeding without compression."); + netdata_log_error("Failed to initialize zlib. Proceeding without compression."); } } @@ -177,9 +177,9 @@ static int http_api_v2(struct aclk_query_thread *query_thr, aclk_query_t query) z_ret = deflate(&w->response.zstream, Z_FINISH); if(z_ret < 0) { if(w->response.zstream.msg) - error("Error compressing body. ZLIB error: \"%s\"", w->response.zstream.msg); + netdata_log_error("Error compressing body. ZLIB error: \"%s\"", w->response.zstream.msg); else - error("Unknown error during zlib compression."); + netdata_log_error("Unknown error during zlib compression."); retval = 1; w->response.code = 500; aclk_http_msg_v2_err(query_thr->client, query->callback_topic, query->msg_id, w->response.code, CLOUD_EC_ZLIB_ERROR, CLOUD_EMSG_ZLIB_ERROR, NULL, 0); @@ -366,7 +366,7 @@ void aclk_query_threads_start(struct aclk_query_threads *query_threads, mqtt_wss query_threads->thread_list[i].client = client; if(unlikely(snprintfz(thread_name, TASK_LEN_MAX, "ACLK_QRY[%d]", i) < 0)) - error("snprintf encoding error"); + netdata_log_error("snprintf encoding error"); netdata_thread_create( &query_threads->thread_list[i].thread, thread_name, NETDATA_THREAD_OPTION_JOINABLE, aclk_query_main_thread, &query_threads->thread_list[i]); diff --git a/aclk/aclk_query_queue.c b/aclk/aclk_query_queue.c index 78a906d962..8ca21d4563 100644 --- a/aclk/aclk_query_queue.c +++ b/aclk/aclk_query_queue.c @@ -27,7 +27,7 @@ static inline int _aclk_queue_query(aclk_query_t query) if (aclk_query_queue.block_push) { ACLK_QUEUE_UNLOCK; if(service_running(SERVICE_ACLK | ABILITY_DATA_QUERIES)) - error("Query Queue is blocked from accepting new requests. This is normally the case when ACLK prepares to shutdown."); + netdata_log_error("Query Queue is blocked from accepting new requests. This is normally the case when ACLK prepares to shutdown."); aclk_query_free(query); return 1; } @@ -67,7 +67,7 @@ aclk_query_t aclk_queue_pop(void) if (aclk_query_queue.block_push) { ACLK_QUEUE_UNLOCK; if(service_running(SERVICE_ACLK | ABILITY_DATA_QUERIES)) - error("POP Query Queue is blocked from accepting new requests. This is normally the case when ACLK prepares to shutdown."); + netdata_log_error("POP Query Queue is blocked from accepting new requests. This is normally the case when ACLK prepares to shutdown."); return NULL; } diff --git a/aclk/aclk_query_queue.h b/aclk/aclk_query_queue.h index 944fc0797a..5983561a68 100644 --- a/aclk/aclk_query_queue.h +++ b/aclk/aclk_query_queue.h @@ -79,7 +79,7 @@ void aclk_queue_unlock(void); if (likely(query->data.bin_payload.payload)) { \ aclk_queue_query(query); \ } else { \ - error("Failed to generate payload (%s)", __FUNCTION__); \ + netdata_log_error("Failed to generate payload (%s)", __FUNCTION__); \ aclk_query_free(query); \ } diff --git a/aclk/aclk_rx_msgs.c b/aclk/aclk_rx_msgs.c index 96c70d7571..82b6252964 100644 --- a/aclk/aclk_rx_msgs.c +++ b/aclk/aclk_rx_msgs.c @@ -103,14 +103,14 @@ static inline int aclk_v2_payload_get_query(const char *payload, char **query_ur // TODO better check of URL if(strncmp(payload, ACLK_CLOUD_REQ_V2_PREFIX, strlen(ACLK_CLOUD_REQ_V2_PREFIX))) { errno = 0; - error("Only accepting requests that start with \"%s\" from CLOUD.", ACLK_CLOUD_REQ_V2_PREFIX); + netdata_log_error("Only accepting requests that start with \"%s\" from CLOUD.", ACLK_CLOUD_REQ_V2_PREFIX); return 1; } start = payload + 4; if(!(end = strstr(payload, " HTTP/1.1\x0D\x0A"))) { errno = 0; - error("Doesn't look like HTTP GET request."); + netdata_log_error("Doesn't look like HTTP GET request."); return 1; } @@ -126,7 +126,7 @@ static int aclk_handle_cloud_http_request_v2(struct aclk_request *cloud_to_agent errno = 0; if (cloud_to_agent->version < ACLK_V_COMPRESSION) { - error( + netdata_log_error( "This handler cannot reply to request with version older than %d, received %d.", ACLK_V_COMPRESSION, cloud_to_agent->version); @@ -136,22 +136,22 @@ static int aclk_handle_cloud_http_request_v2(struct aclk_request *cloud_to_agent query = aclk_query_new(HTTP_API_V2); if (unlikely(aclk_extract_v2_data(raw_payload, &query->data.http_api_v2.payload))) { - error("Error extracting payload expected after the JSON dictionary."); + netdata_log_error("Error extracting payload expected after the JSON dictionary."); goto error; } if (unlikely(aclk_v2_payload_get_query(query->data.http_api_v2.payload, &query->dedup_id))) { - error("Could not extract payload from query"); + netdata_log_error("Could not extract payload from query"); goto error; } if (unlikely(!cloud_to_agent->callback_topic)) { - error("Missing callback_topic"); + netdata_log_error("Missing callback_topic"); goto error; } if (unlikely(!cloud_to_agent->msg_id)) { - error("Missing msg_id"); + netdata_log_error("Missing msg_id"); goto error; } @@ -254,13 +254,13 @@ int create_node_instance_result(const char *msg, size_t msg_len) uuid_t host_id, node_id; if (uuid_parse(res.machine_guid, host_id)) { - error("Error parsing machine_guid provided by CreateNodeInstanceResult"); + netdata_log_error("Error parsing machine_guid provided by CreateNodeInstanceResult"); freez(res.machine_guid); freez(res.node_id); return 1; } if (uuid_parse(res.node_id, node_id)) { - error("Error parsing node_id provided by CreateNodeInstanceResult"); + netdata_log_error("Error parsing node_id provided by CreateNodeInstanceResult"); freez(res.machine_guid); freez(res.node_id); return 1; @@ -341,7 +341,7 @@ int start_alarm_streaming(const char *msg, size_t msg_len) { struct start_alarm_streaming res = parse_start_alarm_streaming(msg, msg_len); if (!res.node_id) { - error("Error parsing StartAlarmStreaming"); + netdata_log_error("Error parsing StartAlarmStreaming"); return 1; } aclk_start_alert_streaming(res.node_id, res.resets); @@ -353,7 +353,7 @@ int send_alarm_checkpoint(const char *msg, size_t msg_len) { struct send_alarm_checkpoint sac = parse_send_alarm_checkpoint(msg, msg_len); if (!sac.node_id || !sac.claim_id) { - error("Error parsing SendAlarmCheckpoint"); + netdata_log_error("Error parsing SendAlarmCheckpoint"); freez(sac.node_id); freez(sac.claim_id); return 1; @@ -368,7 +368,7 @@ int send_alarm_configuration(const char *msg, size_t msg_len) { char *config_hash = parse_send_alarm_configuration(msg, msg_len); if (!config_hash || !*config_hash) { - error("Error parsing SendAlarmConfiguration"); + netdata_log_error("Error parsing SendAlarmConfiguration"); freez(config_hash); return 1; } @@ -381,7 +381,7 @@ int send_alarm_snapshot(const char *msg, size_t msg_len) { struct send_alarm_snapshot *sas = parse_send_alarm_snapshot(msg, msg_len); if (!sas->node_id || !sas->claim_id || !sas->snapshot_uuid) { - error("Error parsing SendAlarmSnapshot"); + netdata_log_error("Error parsing SendAlarmSnapshot"); destroy_send_alarm_snapshot(sas); return 1; } @@ -396,7 +396,7 @@ int handle_disconnect_req(const char *msg, size_t msg_len) if (!cmd) return 1; if (cmd->permaban) { - error("Cloud Banned This Agent!"); + netdata_log_error("Cloud Banned This Agent!"); aclk_disable_runtime = 1; } netdata_log_info("Cloud requested disconnect (EC=%u, \"%s\")", (unsigned int)cmd->error_code, cmd->error_description); @@ -531,7 +531,7 @@ void aclk_handle_new_cloud_msg(const char *message_type, const char *msg, size_t new_cloud_rx_msg_t *msg_descriptor = find_rx_handler_by_hash(simple_hash(message_type)); debug(D_ACLK, "Got message named '%s' from cloud", message_type); if (unlikely(!msg_descriptor)) { - error("Do not know how to handle message of type '%s'. Ignoring", message_type); + netdata_log_error("Do not know how to handle message of type '%s'. Ignoring", message_type); if (aclk_stats_enabled) { ACLK_STATS_LOCK; aclk_metrics_per_sample.cloud_req_err++; @@ -557,7 +557,7 @@ void aclk_handle_new_cloud_msg(const char *message_type, const char *msg, size_t ACLK_STATS_UNLOCK; } if (msg_descriptor->fnc(msg, msg_len)) { - error("Error processing message of type '%s'", message_type); + netdata_log_error("Error processing message of type '%s'", message_type); if (aclk_stats_enabled) { ACLK_STATS_LOCK; aclk_metrics_per_sample.cloud_req_err++; diff --git a/aclk/aclk_stats.c b/aclk/aclk_stats.c index 2b4d5e48ab..f4672882b7 100644 --- a/aclk/aclk_stats.c +++ b/aclk/aclk_stats.c @@ -193,7 +193,7 @@ static void aclk_stats_query_threads(uint32_t *queries_per_thread) for (int i = 0; i < aclk_stats_cfg.query_thread_count; i++) { if (snprintfz(dim_name, MAX_DIM_NAME, "Query %d", i) < 0) - error("snprintf encoding error"); + netdata_log_error("snprintf encoding error"); aclk_qt_data[i].dim = rrddim_add(st, dim_name, NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); } } @@ -463,7 +463,7 @@ void aclk_stats_msg_puback(uint16_t id) if (unlikely(!pub_time[id])) { ACLK_STATS_UNLOCK; - error("Received PUBACK for unknown message?!"); + netdata_log_error("Received PUBACK for unknown message?!"); return; } diff --git a/aclk/aclk_tx_msgs.c b/aclk/aclk_tx_msgs.c index d11e96cfba..26e20dfb2c 100644 --- a/aclk/aclk_tx_msgs.c +++ b/aclk/aclk_tx_msgs.c @@ -32,7 +32,7 @@ uint16_t aclk_send_bin_message_subtopic_pid(mqtt_wss_client client, char *msg, s const char *topic = aclk_get_topic(subtopic); if (unlikely(!topic)) { - error("Couldn't get topic. Aborting message send."); + netdata_log_error("Couldn't get topic. Aborting message send."); return 0; } @@ -61,7 +61,7 @@ static int aclk_send_message_with_bin_payload(mqtt_wss_client client, json_objec int len; if (unlikely(!topic || topic[0] != '/')) { - error ("Full topic required!"); + netdata_log_error("Full topic required!"); json_object_put(msg); return HTTP_RESP_INTERNAL_SERVER_ERROR; } @@ -172,7 +172,7 @@ void aclk_http_msg_v2_err(mqtt_wss_client client, const char *topic, const char json_object_object_add(msg, "error-description", tmp); if (aclk_send_message_with_bin_payload(client, msg, topic, payload, payload_len)) { - error("Failed to send cancellation message for http reply %zu %s", payload_len, payload); + netdata_log_error("Failed to send cancellation message for http reply %zu %s", payload_len, payload); } } @@ -220,7 +220,7 @@ uint16_t aclk_send_agent_connection_update(mqtt_wss_client client, int reachable rrdhost_aclk_state_lock(localhost); if (unlikely(!localhost->aclk_state.claimed_id)) { - error("Internal error. Should not come here if not claimed"); + netdata_log_error("Internal error. Should not come here if not claimed"); rrdhost_aclk_state_unlock(localhost); return 0; } @@ -233,7 +233,7 @@ uint16_t aclk_send_agent_connection_update(mqtt_wss_client client, int reachable rrdhost_aclk_state_unlock(localhost); if (!msg) { - error("Error generating agent::v1::UpdateAgentConnection payload"); + netdata_log_error("Error generating agent::v1::UpdateAgentConnection payload"); return 0; } @@ -255,7 +255,7 @@ char *aclk_generate_lwt(size_t *size) { rrdhost_aclk_state_lock(localhost); if (unlikely(!localhost->aclk_state.claimed_id)) { - error("Internal error. Should not come here if not claimed"); + netdata_log_error("Internal error. Should not come here if not claimed"); rrdhost_aclk_state_unlock(localhost); return NULL; } @@ -265,7 +265,7 @@ char *aclk_generate_lwt(size_t *size) { rrdhost_aclk_state_unlock(localhost); if (!msg) - error("Error generating agent::v1::UpdateAgentConnection payload for LWT"); + netdata_log_error("Error generating agent::v1::UpdateAgentConnection payload for LWT"); return msg; } diff --git a/aclk/aclk_util.c b/aclk/aclk_util.c index 7d03f97fd1..da81664770 100644 --- a/aclk/aclk_util.c +++ b/aclk/aclk_util.c @@ -185,7 +185,7 @@ static void topic_generate_final(struct aclk_topic *t) { rrdhost_aclk_state_lock(localhost); if (unlikely(!localhost->aclk_state.claimed_id)) { - error("This should never be called if agent not claimed"); + netdata_log_error("This should never be called if agent not claimed"); rrdhost_aclk_state_unlock(localhost); return; } @@ -214,7 +214,7 @@ static int topic_cache_add_topic(struct json_object *json, struct aclk_topic *to while (!json_object_iter_equal(&it, &itEnd)) { if (!strcmp(json_object_iter_peek_name(&it), JSON_TOPIC_KEY_NAME)) { if (json_object_get_type(json_object_iter_peek_value(&it)) != json_type_string) { - error("topic dictionary key \"" JSON_TOPIC_KEY_NAME "\" is expected to be json_type_string"); + netdata_log_error("topic dictionary key \"" JSON_TOPIC_KEY_NAME "\" is expected to be json_type_string"); return 1; } topic->topic_id = topic_name_to_id(json_object_get_string(json_object_iter_peek_value(&it))); @@ -226,7 +226,7 @@ static int topic_cache_add_topic(struct json_object *json, struct aclk_topic *to } if (!strcmp(json_object_iter_peek_name(&it), JSON_TOPIC_KEY_TOPIC)) { if (json_object_get_type(json_object_iter_peek_value(&it)) != json_type_string) { - error("topic dictionary key \"" JSON_TOPIC_KEY_TOPIC "\" is expected to be json_type_string"); + netdata_log_error("topic dictionary key \"" JSON_TOPIC_KEY_TOPIC "\" is expected to be json_type_string"); return 1; } topic->topic_recvd = strdupz(json_object_get_string(json_object_iter_peek_value(&it))); @@ -234,12 +234,12 @@ static int topic_cache_add_topic(struct json_object *json, struct aclk_topic *to continue; } - error("topic dictionary has Unknown/Unexpected key \"%s\" in topic description. Ignoring!", json_object_iter_peek_name(&it)); + netdata_log_error("topic dictionary has Unknown/Unexpected key \"%s\" in topic description. Ignoring!", json_object_iter_peek_name(&it)); json_object_iter_next(&it); } if (!topic->topic_recvd) { - error("topic dictionary Missig compulsory key %s", JSON_TOPIC_KEY_TOPIC); + netdata_log_error("topic dictionary Missig compulsory key %s", JSON_TOPIC_KEY_TOPIC); return 1; } @@ -255,7 +255,7 @@ int aclk_generate_topic_cache(struct json_object *json) size_t array_size = json_object_array_length(json); if (!array_size) { - error("Empty topic list!"); + netdata_log_error("Empty topic list!"); return 1; } @@ -267,19 +267,19 @@ int aclk_generate_topic_cache(struct json_object *json) for (size_t i = 0; i < array_size; i++) { obj = json_object_array_get_idx(json, i); if (json_object_get_type(obj) != json_type_object) { - error("expected json_type_object"); + netdata_log_error("expected json_type_object"); return 1; } aclk_topic_cache[i] = callocz(1, sizeof(struct aclk_topic)); if (topic_cache_add_topic(obj, aclk_topic_cache[i])) { - error("failed to parse topic @idx=%d", (int)i); + netdata_log_error("failed to parse topic @idx=%d", (int)i); return 1; } } for (int i = 0; compulsory_topics[i] != ACLK_TOPICID_UNKNOWN; i++) { if (!aclk_get_topic(compulsory_topics[i])) { - error("missing compulsory topic \"%s\" in password response from cloud", topic_id_to_name(compulsory_topics[i])); + netdata_log_error("missing compulsory topic \"%s\" in password response from cloud", topic_id_to_name(compulsory_topics[i])); return 1; } } @@ -295,7 +295,7 @@ int aclk_generate_topic_cache(struct json_object *json) const char *aclk_get_topic(enum aclk_topics topic) { if (!aclk_topic_cache) { - error("Topic cache not initialized"); + netdata_log_error("Topic cache not initialized"); return NULL; } @@ -303,7 +303,7 @@ const char *aclk_get_topic(enum aclk_topics topic) if (aclk_topic_cache[i]->topic_id == topic) return aclk_topic_cache[i]->topic; } - error("Unknown topic"); + netdata_log_error("Unknown topic"); return NULL; } @@ -315,7 +315,7 @@ const char *aclk_get_topic(enum aclk_topics topic) const char *aclk_topic_cache_iterate(aclk_topic_cache_iter_t *iter) { if (!aclk_topic_cache) { - error("Topic cache not initialized when %s was called.", __FUNCTION__); + netdata_log_error("Topic cache not initialized when %s was called.", __FUNCTION__); return NULL; } diff --git a/aclk/https_client.c b/aclk/https_client.c index 9df935da49..62f99aab62 100644 --- a/aclk/https_client.c +++ b/aclk/https_client.c @@ -70,17 +70,17 @@ static int parse_http_hdr(rbuf_t buf, http_parse_ctx *parse_ctx) char buf_val[HTTP_HDR_BUFFER_SIZE]; char *ptr = buf_key; if (!rbuf_find_bytes(buf, HTTP_LINE_TERM, strlen(HTTP_LINE_TERM), &idx_end)) { - error("CRLF expected"); + netdata_log_error("CRLF expected"); return 1; } char *separator = rbuf_find_bytes(buf, HTTP_KEYVAL_SEPARATOR, strlen(HTTP_KEYVAL_SEPARATOR), &idx); if (!separator) { - error("Missing Key/Value separator"); + netdata_log_error("Missing Key/Value separator"); return 1; } if (idx >= HTTP_HDR_BUFFER_SIZE) { - error("Key name is too long"); + netdata_log_error("Key name is too long"); return 1; } @@ -90,7 +90,7 @@ static int parse_http_hdr(rbuf_t buf, http_parse_ctx *parse_ctx) rbuf_bump_tail(buf, strlen(HTTP_KEYVAL_SEPARATOR)); idx_end -= strlen(HTTP_KEYVAL_SEPARATOR) + idx; if (idx_end >= HTTP_HDR_BUFFER_SIZE) { - error("Value of key \"%s\" too long", buf_key); + netdata_log_error("Value of key \"%s\" too long", buf_key); return 1; } @@ -116,22 +116,22 @@ static int parse_http_response(rbuf_t buf, http_parse_ctx *parse_ctx) switch (parse_ctx->state) { case HTTP_PARSE_INITIAL: if (rbuf_memcmp_n(buf, RESP_PROTO, strlen(RESP_PROTO))) { - error("Expected response to start with \"%s\"", RESP_PROTO); + netdata_log_error("Expected response to start with \"%s\"", RESP_PROTO); return PARSE_ERROR; } rbuf_bump_tail(buf, strlen(RESP_PROTO)); if (rbuf_pop(buf, rc, 4) != 4) { - error("Expected HTTP status code"); + netdata_log_error("Expected HTTP status code"); return PARSE_ERROR; } if (rc[3] != ' ') { - error("Expected space after HTTP return code"); + netdata_log_error("Expected space after HTTP return code"); return PARSE_ERROR; } rc[3] = 0; parse_ctx->http_code = atoi(rc); if (parse_ctx->http_code < 100 || parse_ctx->http_code >= 600) { - error("HTTP code not in range 100 to 599"); + netdata_log_error("HTTP code not in range 100 to 599"); return PARSE_ERROR; } @@ -186,7 +186,7 @@ typedef struct https_req_ctx { static int https_req_check_timedout(https_req_ctx_t *ctx) { if (now_realtime_sec() > ctx->req_start_time + ctx->request->timeout_s) { - error("request timed out"); + netdata_log_error("request timed out"); return 1; } return 0; @@ -220,12 +220,12 @@ static int socket_write_all(https_req_ctx_t *ctx, char *data, size_t data_len) { do { int ret = poll(&ctx->poll_fd, 1, POLL_TO_MS); if (ret < 0) { - error("poll error"); + netdata_log_error("poll error"); return 1; } if (ret == 0) { if (https_req_check_timedout(ctx)) { - error("Poll timed out"); + netdata_log_error("Poll timed out"); return 2; } continue; @@ -235,7 +235,7 @@ static int socket_write_all(https_req_ctx_t *ctx, char *data, size_t data_len) { if (ret > 0) { ctx->written += ret; } else if (errno != EAGAIN && errno != EWOULDBLOCK) { - error("Error writing to socket"); + netdata_log_error("Error writing to socket"); return 3; } } while (ctx->written < data_len); @@ -250,12 +250,12 @@ static int ssl_write_all(https_req_ctx_t *ctx, char *data, size_t data_len) { do { int ret = poll(&ctx->poll_fd, 1, POLL_TO_MS); if (ret < 0) { - error("poll error"); + netdata_log_error("poll error"); return 1; } if (ret == 0) { if (https_req_check_timedout(ctx)) { - error("Poll timed out"); + netdata_log_error("Poll timed out"); return 2; } continue; @@ -275,7 +275,7 @@ static int ssl_write_all(https_req_ctx_t *ctx, char *data, size_t data_len) { ctx->poll_fd.events |= POLLOUT; break; default: - error("SSL_write Err: %s", _ssl_err_tos(ret)); + netdata_log_error("SSL_write Err: %s", _ssl_err_tos(ret)); return 3; } } @@ -299,12 +299,12 @@ static int read_parse_response(https_req_ctx_t *ctx) { do { ret = poll(&ctx->poll_fd, 1, POLL_TO_MS); if (ret < 0) { - error("poll error"); + netdata_log_error("poll error"); return 1; } if (ret == 0) { if (https_req_check_timedout(ctx)) { - error("Poll timed out"); + netdata_log_error("Poll timed out"); return 2; } if (!ctx->ssl_ctx) @@ -332,12 +332,12 @@ static int read_parse_response(https_req_ctx_t *ctx) { ctx->poll_fd.events |= POLLOUT; break; default: - error("SSL_read Err: %s", _ssl_err_tos(ret)); + netdata_log_error("SSL_read Err: %s", _ssl_err_tos(ret)); return 3; } } else { if (errno != EAGAIN && errno != EWOULDBLOCK) { - error("write error"); + netdata_log_error("write error"); return 3; } ctx->poll_fd.events |= POLLIN; @@ -346,7 +346,7 @@ static int read_parse_response(https_req_ctx_t *ctx) { } while (!(ret = parse_http_response(ctx->buf_rx, &ctx->parse_ctx))); if (ret != PARSE_SUCCESS) { - error("Error parsing HTTP response"); + netdata_log_error("Error parsing HTTP response"); return 1; } @@ -373,7 +373,7 @@ static int handle_http_request(https_req_ctx_t *ctx) { buffer_strcat(hdr, "POST "); break; default: - error("Unknown HTTPS request type!"); + netdata_log_error("Unknown HTTPS request type!"); rc = 1; goto err_exit; } @@ -419,14 +419,14 @@ static int handle_http_request(https_req_ctx_t *ctx) { // Send the request if (https_client_write_all(ctx, hdr->buffer, hdr->len)) { - error("Couldn't write HTTP request header into SSL connection"); + netdata_log_error("Couldn't write HTTP request header into SSL connection"); rc = 2; goto err_exit; } if (ctx->request->request_type == HTTP_REQ_POST && ctx->request->payload && ctx->request->payload_size) { if (https_client_write_all(ctx, ctx->request->payload, ctx->request->payload_size)) { - error("Couldn't write payload into SSL connection"); + netdata_log_error("Couldn't write payload into SSL connection"); rc = 3; goto err_exit; } @@ -434,7 +434,7 @@ static int handle_http_request(https_req_ctx_t *ctx) { // Read The Response if (read_parse_response(ctx)) { - error("Error reading or parsing response from server"); + netdata_log_error("Error reading or parsing response from server"); rc = 4; goto err_exit; } @@ -456,7 +456,7 @@ static int cert_verify_callback(int preverify_ok, X509_STORE_CTX *ctx) err_cert = X509_STORE_CTX_get_current_cert(ctx); err_str = X509_NAME_oneline(X509_get_subject_name(err_cert), NULL, 0); - error("Cert Chain verify error:num=%d:%s:depth=%d:%s", err, + netdata_log_error("Cert Chain verify error:num=%d:%s:depth=%d:%s", err, X509_verify_cert_error_string(err), depth, err_str); free(err_str); @@ -466,7 +466,7 @@ static int cert_verify_callback(int preverify_ok, X509_STORE_CTX *ctx) if (!preverify_ok && err == X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT) { preverify_ok = 1; - error("Self Signed Certificate Accepted as the agent was built with ACLK_SSL_ALLOW_SELF_SIGNED"); + netdata_log_error("Self Signed Certificate Accepted as the agent was built with ACLK_SSL_ALLOW_SELF_SIGNED"); } #endif @@ -486,7 +486,7 @@ int https_request(https_req_t *request, https_req_response_t *response) { ctx->buf_rx = rbuf_create(RX_BUFFER_SIZE); if (!ctx->buf_rx) { - error("Couldn't allocate buffer for RX data"); + netdata_log_error("Couldn't allocate buffer for RX data"); goto exit_req_ctx; } @@ -494,12 +494,12 @@ int https_request(https_req_t *request, https_req_response_t *response) { ctx->sock = connect_to_this_ip46(IPPROTO_TCP, SOCK_STREAM, connect_host, 0, connect_port_str, &timeout); if (ctx->sock < 0) { - error("Error connecting TCP socket to \"%s\"", connect_host); + netdata_log_error("Error connecting TCP socket to \"%s\"", connect_host); goto exit_buf_rx; } if (fcntl(ctx->sock, F_SETFL, fcntl(ctx->sock, F_GETFL, 0) | O_NONBLOCK) == -1) { - error("Error setting O_NONBLOCK to TCP socket."); + netdata_log_error("Error setting O_NONBLOCK to TCP socket."); goto exit_sock; } @@ -517,11 +517,11 @@ int https_request(https_req_t *request, https_req_response_t *response) { req.proxy_password = request->proxy_password; ctx->request = &req; if (handle_http_request(ctx)) { - error("Failed to CONNECT with proxy"); + netdata_log_error("Failed to CONNECT with proxy"); goto exit_sock; } if (ctx->parse_ctx.http_code != 200) { - error("Proxy didn't return 200 OK (got %d)", ctx->parse_ctx.http_code); + netdata_log_error("Proxy didn't return 200 OK (got %d)", ctx->parse_ctx.http_code); goto exit_sock; } netdata_log_info("Proxy accepted CONNECT upgrade"); @@ -530,26 +530,26 @@ int https_request(https_req_t *request, https_req_response_t *response) { ctx->ssl_ctx = netdata_ssl_create_client_ctx(0); if (ctx->ssl_ctx==NULL) { - error("Cannot allocate SSL context"); + netdata_log_error("Cannot allocate SSL context"); goto exit_sock; } if (!SSL_CTX_set_default_verify_paths(ctx->ssl_ctx)) { - error("Error setting default verify paths"); + netdata_log_error("Error setting default verify paths"); goto exit_CTX; } SSL_CTX_set_verify(ctx->ssl_ctx, SSL_VERIFY_PEER | SSL_VERIFY_CLIENT_ONCE, cert_verify_callback); ctx->ssl = SSL_new(ctx->ssl_ctx); if (ctx->ssl==NULL) { - error("Cannot allocate SSL"); + netdata_log_error("Cannot allocate SSL"); goto exit_CTX; } SSL_set_fd(ctx->ssl, ctx->sock); ret = SSL_connect(ctx->ssl); if (ret != -1 && ret != 1) { - error("SSL could not connect"); + netdata_log_error("SSL could not connect"); goto exit_SSL; } if (ret == -1) { @@ -557,14 +557,14 @@ int https_request(https_req_t *request, https_req_response_t *response) { // consult SSL_connect documentation for details int ec = SSL_get_error(ctx->ssl, ret); if (ec != SSL_ERROR_WANT_READ && ec != SSL_ERROR_WANT_WRITE) { - error("Failed to start SSL connection"); + netdata_log_error("Failed to start SSL connection"); goto exit_SSL; } } // The actual request here if (handle_http_request(ctx)) { - error("Couldn't process request"); + netdata_log_error("Couldn't process request"); goto exit_SSL; } response->http_code = ctx->parse_ctx.http_code; @@ -573,7 +573,7 @@ int https_request(https_req_t *request, https_req_response_t *response) { response->payload = mallocz(response->payload_size + 1); ret = rbuf_pop(ctx->buf_rx, response->payload, response->payload_size); if (ret != (int)response->payload_size) { - error("Payload size doesn't match remaining data on the buffer!"); + netdata_log_error("Payload size doesn't match remaining data on the buffer!"); response->payload_size = ret; } // normally we take payload as it is and copy it @@ -627,16 +627,16 @@ static int parse_host_port(url_t *url) { if (ptr) { size_t port_len = strlen(ptr + 1); if (!port_len) { - error(URL_PARSER_LOG_PREFIX ": specified but no port number"); + netdata_log_error(URL_PARSER_LOG_PREFIX ": specified but no port number"); return 1; } if (port_len > 5 /* MAX port length is 5digit long in decimal */) { - error(URL_PARSER_LOG_PREFIX "port # is too long"); + netdata_log_error(URL_PARSER_LOG_PREFIX "port # is too long"); return 1; } *ptr = 0; if (!strlen(url->host)) { - error(URL_PARSER_LOG_PREFIX "host empty after removing port"); + netdata_log_error(URL_PARSER_LOG_PREFIX "host empty after removing port"); return 1; } url->port = atoi (ptr + 1); @@ -672,7 +672,7 @@ int url_parse(const char *url, url_t *parsed) { if (end) { if (end == start) { - error (URL_PARSER_LOG_PREFIX "found " URI_PROTO_SEPARATOR " without protocol specified"); + netdata_log_error(URL_PARSER_LOG_PREFIX "found " URI_PROTO_SEPARATOR " without protocol specified"); return 1; } @@ -685,7 +685,7 @@ int url_parse(const char *url, url_t *parsed) { end = start + strlen(start); if (start == end) { - error(URL_PARSER_LOG_PREFIX "Host empty"); + netdata_log_error(URL_PARSER_LOG_PREFIX "Host empty"); return 1; } diff --git a/aclk/schema-wrappers/alarm_stream.cc b/aclk/schema-wrappers/alarm_stream.cc index 11b9284f5a..d1079a6885 100644 --- a/aclk/schema-wrappers/alarm_stream.cc +++ b/aclk/schema-wrappers/alarm_stream.cc @@ -59,7 +59,7 @@ static alarms::v1::AlarmStatus aclk_alarm_status_to_proto(enum aclk_alarm_status case aclk_alarm_status::ALARM_STATUS_CRITICAL: return alarms::v1::ALARM_STATUS_CRITICAL; default: - error("Unknown alarm status"); + netdata_log_error("Unknown alarm status"); return alarms::v1::ALARM_STATUS_UNKNOWN; } } diff --git a/claim/claim.c b/claim/claim.c index 1f61fdee28..4e4161b2d5 100644 --- a/claim/claim.c +++ b/claim/claim.c @@ -50,7 +50,7 @@ extern struct registry registry; CLAIM_AGENT_RESPONSE claim_agent(const char *claiming_arguments, bool force, const char **msg) { if (!force || !netdata_cloud_enabled) { - error("Refusing to claim agent -> cloud functionality has been disabled"); + netdata_log_error("Refusing to claim agent -> cloud functionality has been disabled"); return CLAIM_AGENT_CLOUD_DISABLED; } @@ -88,7 +88,7 @@ CLAIM_AGENT_RESPONSE claim_agent(const char *claiming_arguments, bool force, con netdata_log_info("Executing agent claiming command 'netdata-claim.sh'"); fp_child_output = netdata_popen(command_buffer, &command_pid, &fp_child_input); if(!fp_child_output) { - error("Cannot popen(\"%s\").", command_buffer); + netdata_log_error("Cannot popen(\"%s\").", command_buffer); return CLAIM_AGENT_CANNOT_EXECUTE_CLAIM_SCRIPT; } netdata_log_info("Waiting for claiming command to finish."); @@ -100,19 +100,19 @@ CLAIM_AGENT_RESPONSE claim_agent(const char *claiming_arguments, bool force, con return CLAIM_AGENT_OK; } if (exit_code < 0) { - error("Agent claiming command failed to complete its run."); + netdata_log_error("Agent claiming command failed to complete its run."); return CLAIM_AGENT_CLAIM_SCRIPT_FAILED; } errno = 0; unsigned maximum_known_exit_code = sizeof(claiming_errors) / sizeof(claiming_errors[0]) - 1; if ((unsigned)exit_code > maximum_known_exit_code) { - error("Agent failed to be claimed with an unknown error."); + netdata_log_error("Agent failed to be claimed with an unknown error."); return CLAIM_AGENT_CLAIM_SCRIPT_RETURNED_INVALID_CODE; } - error("Agent failed to be claimed with the following error message:"); - error("\"%s\"", claiming_errors[exit_code]); + netdata_log_error("Agent failed to be claimed with the following error message:"); + netdata_log_error("\"%s\"", claiming_errors[exit_code]); if(msg) *msg = claiming_errors[exit_code]; @@ -167,7 +167,7 @@ void load_claiming_state(void) long bytes_read; char *claimed_id = read_by_filename(filename, &bytes_read); if(claimed_id && uuid_parse(claimed_id, uuid)) { - error("claimed_id \"%s\" doesn't look like valid UUID", claimed_id); + netdata_log_error("claimed_id \"%s\" doesn't look like valid UUID", claimed_id); freez(claimed_id); claimed_id = NULL; } @@ -250,12 +250,12 @@ bool netdata_random_session_id_generate(void) { // save it int fd = open(filename, O_WRONLY|O_CREAT|O_TRUNC, 640); if(fd == -1) { - error("Cannot create random session id file '%s'.", filename); + netdata_log_error("Cannot create random session id file '%s'.", filename); ret = false; } if(write(fd, guid, UUID_STR_LEN - 1) != UUID_STR_LEN - 1) { - error("Cannot write the random session id file '%s'.", filename); + netdata_log_error("Cannot write the random session id file '%s'.", filename); ret = false; } diff --git a/cli/cli.c b/cli/cli.c index c32a88f79a..288173b1e5 100644 --- a/cli/cli.c +++ b/cli/cli.c @@ -32,7 +32,7 @@ void *callocz_int(size_t nmemb, size_t size, const char *file __maybe_unused, co { void *p = calloc(nmemb, size); if (unlikely(!p)) { - error("Cannot allocate %zu bytes of memory.", nmemb * size); + netdata_log_error("Cannot allocate %zu bytes of memory.", nmemb * size); exit(1); } return p; @@ -42,7 +42,7 @@ void *mallocz_int(size_t size, const char *file __maybe_unused, const char *func { void *p = malloc(size); if (unlikely(!p)) { - error("Cannot allocate %zu bytes of memory.", size); + netdata_log_error("Cannot allocate %zu bytes of memory.", size); exit(1); } return p; @@ -52,7 +52,7 @@ void *reallocz_int(void *ptr, size_t size, const char *file __maybe_unused, cons { void *p = realloc(ptr, size); if (unlikely(!p)) { - error("Cannot allocate %zu bytes of memory.", size); + netdata_log_error("Cannot allocate %zu bytes of memory.", size); exit(1); } return p; @@ -70,7 +70,7 @@ void freez(void *ptr) { void *mallocz(size_t size) { void *p = malloc(size); if (unlikely(!p)) { - error("Cannot allocate %zu bytes of memory.", size); + netdata_log_error("Cannot allocate %zu bytes of memory.", size); exit(1); } return p; @@ -79,7 +79,7 @@ void *mallocz(size_t size) { void *callocz(size_t nmemb, size_t size) { void *p = calloc(nmemb, size); if (unlikely(!p)) { - error("Cannot allocate %zu bytes of memory.", nmemb * size); + netdata_log_error("Cannot allocate %zu bytes of memory.", nmemb * size); exit(1); } return p; @@ -88,7 +88,7 @@ void *callocz(size_t nmemb, size_t size) { void *reallocz(void *ptr, size_t size) { void *p = realloc(ptr, size); if (unlikely(!p)) { - error("Cannot allocate %zu bytes of memory.", size); + netdata_log_error("Cannot allocate %zu bytes of memory.", size); exit(1); } return p; diff --git a/collectors/apps.plugin/apps_plugin.c b/collectors/apps.plugin/apps_plugin.c index 15c779ebde..ce7e24c113 100644 --- a/collectors/apps.plugin/apps_plugin.c +++ b/collectors/apps.plugin/apps_plugin.c @@ -666,7 +666,7 @@ int read_user_or_group_ids(struct user_or_group_ids *ids, struct timespec *last_ } else { if(unlikely(avl_insert(&ids->index, (avl_t *) user_or_group_id) != (void *) user_or_group_id)) { - error("INTERNAL ERROR: duplicate indexing of id during realloc"); + netdata_log_error("INTERNAL ERROR: duplicate indexing of id during realloc"); }; user_or_group_id->next = ids->root; @@ -682,7 +682,7 @@ int read_user_or_group_ids(struct user_or_group_ids *ids, struct timespec *last_ while(user_or_group_id) { if(unlikely(!user_or_group_id->updated)) { if(unlikely((struct user_or_group_id *)avl_remove(&ids->index, (avl_t *) user_or_group_id) != user_or_group_id)) - error("INTERNAL ERROR: removal of unused id from index, removed a different id"); + netdata_log_error("INTERNAL ERROR: removal of unused id from index, removed a different id"); if(prev_user_id) prev_user_id->next = user_or_group_id->next; @@ -947,7 +947,7 @@ static int read_apps_groups_conf(const char *path, const char *file) // add this target struct target *n = get_apps_groups_target(s, w, name); if(!n) { - error("Cannot create target '%s' (line %zu, word %zu)", s, line, word); + netdata_log_error("Cannot create target '%s' (line %zu, word %zu)", s, line, word); continue; } @@ -997,7 +997,7 @@ static inline void del_pid_entry(pid_t pid) { struct pid_stat *p = all_pids[pid]; if(unlikely(!p)) { - error("attempted to free pid %d that is not allocated.", pid); + netdata_log_error("attempted to free pid %d that is not allocated.", pid); return; } @@ -1035,7 +1035,7 @@ static inline void del_pid_entry(pid_t pid) { static inline int managed_log(struct pid_stat *p, uint32_t log, int status) { if(unlikely(!status)) { - // error("command failed log %u, errno %d", log, errno); + // netdata_log_error("command failed log %u, errno %d", log, errno); if(unlikely(debug_enabled || errno != ENOENT)) { if(unlikely(debug_enabled || !(p->log_thrown & log))) { @@ -1043,33 +1043,33 @@ static inline int managed_log(struct pid_stat *p, uint32_t log, int status) { switch(log) { case PID_LOG_IO: #ifdef __FreeBSD__ - error("Cannot fetch process %d I/O info (command '%s')", p->pid, p->comm); + netdata_log_error("Cannot fetch process %d I/O info (command '%s')", p->pid, p->comm); #else - error("Cannot process %s/proc/%d/io (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); + netdata_log_error("Cannot process %s/proc/%d/io (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); #endif break; case PID_LOG_STATUS: #ifdef __FreeBSD__ - error("Cannot fetch process %d status info (command '%s')", p->pid, p->comm); + netdata_log_error("Cannot fetch process %d status info (command '%s')", p->pid, p->comm); #else - error("Cannot process %s/proc/%d/status (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); + netdata_log_error("Cannot process %s/proc/%d/status (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); #endif break; case PID_LOG_CMDLINE: #ifdef __FreeBSD__ - error("Cannot fetch process %d command line (command '%s')", p->pid, p->comm); + netdata_log_error("Cannot fetch process %d command line (command '%s')", p->pid, p->comm); #else - error("Cannot process %s/proc/%d/cmdline (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); + netdata_log_error("Cannot process %s/proc/%d/cmdline (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); #endif break; case PID_LOG_FDS: #ifdef __FreeBSD__ - error("Cannot fetch process %d files (command '%s')", p->pid, p->comm); + netdata_log_error("Cannot fetch process %d files (command '%s')", p->pid, p->comm); #else - error("Cannot process entries in %s/proc/%d/fd (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); + netdata_log_error("Cannot process entries in %s/proc/%d/fd (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); #endif break; @@ -1077,7 +1077,7 @@ static inline int managed_log(struct pid_stat *p, uint32_t log, int status) { break; default: - error("unhandled error for pid %d, command '%s'", p->pid, p->comm); + netdata_log_error("unhandled error for pid %d, command '%s'", p->pid, p->comm); break; } } @@ -1085,7 +1085,7 @@ static inline int managed_log(struct pid_stat *p, uint32_t log, int status) { errno = 0; } else if(unlikely(p->log_thrown & log)) { - // error("unsetting log %u on pid %d", log, p->pid); + // netdata_log_error("unsetting log %u on pid %d", log, p->pid); p->log_thrown &= ~log; } @@ -1733,7 +1733,7 @@ cleanup: int file_descriptor_compare(void* a, void* b) { #ifdef NETDATA_INTERNAL_CHECKS if(((struct file_descriptor *)a)->magic != 0x0BADCAFE || ((struct file_descriptor *)b)->magic != 0x0BADCAFE) - error("Corrupted index data detected. Please report this."); + netdata_log_error("Corrupted index data detected. Please report this."); #endif /* NETDATA_INTERNAL_CHECKS */ if(((struct file_descriptor *)a)->hash < ((struct file_descriptor *)b)->hash) @@ -1777,7 +1777,7 @@ static inline void file_descriptor_not_used(int id) #ifdef NETDATA_INTERNAL_CHECKS if(all_files[id].magic != 0x0BADCAFE) { - error("Ignoring request to remove empty file id %d.", id); + netdata_log_error("Ignoring request to remove empty file id %d.", id); return; } #endif /* NETDATA_INTERNAL_CHECKS */ @@ -1791,7 +1791,7 @@ static inline void file_descriptor_not_used(int id) debug_log(" >> slot %d is empty.", id); if(unlikely(file_descriptor_remove(&all_files[id]) != (void *)&all_files[id])) - error("INTERNAL ERROR: removal of unused fd from index, removed a different fd"); + netdata_log_error("INTERNAL ERROR: removal of unused fd from index, removed a different fd"); #ifdef NETDATA_INTERNAL_CHECKS all_files[id].magic = 0x00000000; @@ -1800,9 +1800,14 @@ static inline void file_descriptor_not_used(int id) } } else - error("Request to decrease counter of fd %d (%s), while the use counter is 0", id, all_files[id].name); + netdata_log_error("Request to decrease counter of fd %d (%s), while the use counter is 0", + id, + all_files[id].name); } - else error("Request to decrease counter of fd %d, which is outside the array size (1 to %d)", id, all_files_size); + else + netdata_log_error("Request to decrease counter of fd %d, which is outside the array size (1 to %d)", + id, + all_files_size); } static inline void all_files_grow() { @@ -1824,7 +1829,7 @@ static inline void all_files_grow() { for(i = 0; i < all_files_size; i++) { if(!all_files[i].count) continue; if(unlikely(file_descriptor_add(&all_files[i]) != (void *)&all_files[i])) - error("INTERNAL ERROR: duplicate indexing of fd during realloc."); + netdata_log_error("INTERNAL ERROR: duplicate indexing of fd during realloc."); } debug_log(" >> re-indexing done."); @@ -1865,7 +1870,7 @@ static inline int file_descriptor_set_on_empty_slot(const char *name, uint32_t h #ifdef NETDATA_INTERNAL_CHECKS if(all_files[c].magic == 0x0BADCAFE && all_files[c].name && file_descriptor_find(all_files[c].name, all_files[c].hash)) - error("fd on position %d is not cleared properly. It still has %s in it.", c, all_files[c].name); + netdata_log_error("fd on position %d is not cleared properly. It still has %s in it.", c, all_files[c].name); #endif /* NETDATA_INTERNAL_CHECKS */ debug_log(" >> %s fd position %d for %s (last name: %s)", all_files[c].name?"re-using":"using", c, name, all_files[c].name); @@ -1896,7 +1901,7 @@ static inline int file_descriptor_set_on_empty_slot(const char *name, uint32_t h all_files[c].magic = 0x0BADCAFE; #endif /* NETDATA_INTERNAL_CHECKS */ if(unlikely(file_descriptor_add(&all_files[c]) != (void *)&all_files[c])) - error("INTERNAL ERROR: duplicate indexing of fd."); + netdata_log_error("INTERNAL ERROR: duplicate indexing of fd."); debug_log("using fd position %d (name: %s)", c, all_files[c].name); @@ -2014,13 +2019,13 @@ static inline int read_pid_file_descriptors(struct pid_stat *p, void *ptr) { mib[3] = p->pid; if (unlikely(sysctl(mib, 4, NULL, &size, NULL, 0))) { - error("sysctl error: Can't get file descriptors data size for pid %d", p->pid); + netdata_log_error("sysctl error: Can't get file descriptors data size for pid %d", p->pid); return 0; } if (likely(size > 0)) fdsbuf = reallocz(fdsbuf, size); if (unlikely(sysctl(mib, 4, fdsbuf, &size, NULL, 0))) { - error("sysctl error: Can't get file descriptors data for pid %d", p->pid); + netdata_log_error("sysctl error: Can't get file descriptors data for pid %d", p->pid); return 0; } @@ -2193,7 +2198,7 @@ static inline int read_pid_file_descriptors(struct pid_stat *p, void *ptr) { // cannot read the link if(debug_enabled || (p->target && p->target->debug_enabled)) - error("Cannot read link %s", p->fds[fdid].filename); + netdata_log_error("Cannot read link %s", p->fds[fdid].filename); if(unlikely(p->fds[fdid].fd < 0)) { file_descriptor_not_used(-p->fds[fdid].fd); @@ -2524,7 +2529,7 @@ static inline void link_all_processes_to_their_parents(void) { } else { p->parent = NULL; - error("pid %d %s states parent %d, but the later does not exist.", p->pid, p->comm, p->ppid); + netdata_log_error("pid %d %s states parent %d, but the later does not exist.", p->pid, p->comm, p->ppid); } } } @@ -2562,7 +2567,7 @@ static int compar_pid(const void *pid1, const void *pid2) { static inline int collect_data_for_pid(pid_t pid, void *ptr) { if(unlikely(pid < 0 || pid > pid_max)) { - error("Invalid pid %d read (expected %d to %d). Ignoring process.", pid, 0, pid_max); + netdata_log_error("Invalid pid %d read (expected %d to %d). Ignoring process.", pid, 0, pid_max); return 0; } @@ -2581,7 +2586,7 @@ static inline int collect_data_for_pid(pid_t pid, void *ptr) { // check its parent pid if(unlikely(p->ppid < 0 || p->ppid > pid_max)) { - error("Pid %d (command '%s') states invalid parent pid %d. Using 0.", pid, p->comm, p->ppid); + netdata_log_error("Pid %d (command '%s') states invalid parent pid %d. Using 0.", pid, p->comm, p->ppid); p->ppid = 0; } @@ -2633,7 +2638,7 @@ static int collect_data_for_all_processes(void) { int mib[3] = { CTL_KERN, KERN_PROC, KERN_PROC_PROC }; if (unlikely(sysctl(mib, 3, NULL, &new_procbase_size, NULL, 0))) { - error("sysctl error: Can't get processes data size"); + netdata_log_error("sysctl error: Can't get processes data size"); return 0; } @@ -2653,7 +2658,7 @@ static int collect_data_for_all_processes(void) { // get the processes from the system if (unlikely(sysctl(mib, 3, procbase, &new_procbase_size, NULL, 0))) { - error("sysctl error: Can't get processes data"); + netdata_log_error("sysctl error: Can't get processes data"); return 0; } @@ -2681,7 +2686,7 @@ static int collect_data_for_all_processes(void) { #if (ALL_PIDS_ARE_READ_INSTANTLY == 0) if(unlikely(slc != all_pids_count)) { - error("Internal error: I was thinking I had %zu processes in my arrays, but it seems there are %zu.", all_pids_count, slc); + netdata_log_error("Internal error: I was thinking I had %zu processes in my arrays, but it seems there are %zu.", all_pids_count, slc); all_pids_count = slc; } @@ -3105,7 +3110,7 @@ static inline void aggregate_pid_on_target(struct target *w, struct pid_stat *p, } if(unlikely(!w)) { - error("pid %d %s was left without a target!", p->pid, p->comm); + netdata_log_error("pid %d %s was left without a target!", p->pid, p->comm); return; } @@ -4187,7 +4192,7 @@ static void parse_args(int argc, char **argv) exit(1); } - error("Cannot understand option %s", argv[i]); + netdata_log_error("Cannot understand option %s", argv[i]); exit(1); } @@ -4197,7 +4202,7 @@ static void parse_args(int argc, char **argv) netdata_log_info("Cannot read process groups configuration file '%s/apps_groups.conf'. Will try '%s/apps_groups.conf'", user_config_dir, stock_config_dir); if(read_apps_groups_conf(stock_config_dir, "groups")) { - error("Cannot read process groups '%s/apps_groups.conf'. There are no internal defaults. Failing.", stock_config_dir); + netdata_log_error("Cannot read process groups '%s/apps_groups.conf'. There are no internal defaults. Failing.", stock_config_dir); exit(1); } else @@ -4223,7 +4228,7 @@ static int am_i_running_as_root() { static int check_capabilities() { cap_t caps = cap_get_proc(); if(!caps) { - error("Cannot get current capabilities."); + netdata_log_error("Cannot get current capabilities."); return 0; } else if(debug_enabled) @@ -4233,12 +4238,12 @@ static int check_capabilities() { cap_flag_value_t cfv = CAP_CLEAR; if(cap_get_flag(caps, CAP_DAC_READ_SEARCH, CAP_EFFECTIVE, &cfv) == -1) { - error("Cannot find if CAP_DAC_READ_SEARCH is effective."); + netdata_log_error("Cannot find if CAP_DAC_READ_SEARCH is effective."); ret = 0; } else { if(cfv != CAP_SET) { - error("apps.plugin should run with CAP_DAC_READ_SEARCH."); + netdata_log_error("apps.plugin should run with CAP_DAC_READ_SEARCH."); ret = 0; } else if(debug_enabled) @@ -4247,12 +4252,12 @@ static int check_capabilities() { cfv = CAP_CLEAR; if(cap_get_flag(caps, CAP_SYS_PTRACE, CAP_EFFECTIVE, &cfv) == -1) { - error("Cannot find if CAP_SYS_PTRACE is effective."); + netdata_log_error("Cannot find if CAP_SYS_PTRACE is effective."); ret = 0; } else { if(cfv != CAP_SET) { - error("apps.plugin should run with CAP_SYS_PTRACE."); + netdata_log_error("apps.plugin should run with CAP_SYS_PTRACE."); ret = 0; } else if(debug_enabled) @@ -5240,7 +5245,7 @@ void *reader_main(void *arg __maybe_unused) { char *function = get_word(words, num_words, 3); if(!transaction || !*transaction || !timeout_s || !*timeout_s || !function || !*function) { - error("Received incomplete %s (transaction = '%s', timeout = '%s', function = '%s'). Ignoring it.", + netdata_log_error("Received incomplete %s (transaction = '%s', timeout = '%s', function = '%s'). Ignoring it.", keyword, transaction?transaction:"(unset)", timeout_s?timeout_s:"(unset)", @@ -5266,12 +5271,12 @@ void *reader_main(void *arg __maybe_unused) { } } else - error("Received unknown command: %s", keyword?keyword:"(unset)"); + netdata_log_error("Received unknown command: %s", keyword?keyword:"(unset)"); } if(!s || feof(stdin) || ferror(stdin)) { apps_plugin_exit = true; - error("Received error on stdin."); + netdata_log_error("Received error on stdin."); } exit(1); @@ -5351,14 +5356,14 @@ int main(int argc, char **argv) { if(!check_capabilities() && !am_i_running_as_root() && !check_proc_1_io()) { uid_t uid = getuid(), euid = geteuid(); #ifdef HAVE_CAPABILITY - error("apps.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. " + netdata_log_error("apps.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. " "Without these, apps.plugin cannot report disk I/O utilization of other processes. " "To enable capabilities run: sudo setcap cap_dac_read_search,cap_sys_ptrace+ep %s; " "To enable setuid to root run: sudo chown root:netdata %s; sudo chmod 4750 %s; " , uid, euid, argv[0], argv[0], argv[0] ); #else - error("apps.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. " + netdata_log_error("apps.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. " "Without these, apps.plugin cannot report disk I/O utilization of other processes. " "Your system does not support capabilities. " "To enable setuid to root run: sudo chown root:netdata %s; sudo chmod 4750 %s; " @@ -5419,7 +5424,7 @@ int main(int argc, char **argv) { get_MemTotal(); if(!collect_data_for_all_processes()) { - error("Cannot collect /proc data for running processes. Disabling apps.plugin..."); + netdata_log_error("Cannot collect /proc data for running processes. Disabling apps.plugin..."); printf("DISABLE\n"); netdata_mutex_unlock(&mutex); netdata_thread_cancel(reader_thread); diff --git a/collectors/cups.plugin/cups_plugin.c b/collectors/cups.plugin/cups_plugin.c index 1c564a25d4..ce7f05d4d2 100644 --- a/collectors/cups.plugin/cups_plugin.c +++ b/collectors/cups.plugin/cups_plugin.c @@ -104,7 +104,7 @@ void parse_command_line(int argc, char **argv) { if (freq >= netdata_update_every) { netdata_update_every = freq; } else if (freq) { - error("update frequency %d seconds is too small for CUPS. Using %d.", freq, netdata_update_every); + netdata_log_error("update frequency %d seconds is too small for CUPS. Using %d.", freq, netdata_update_every); } } @@ -275,7 +275,7 @@ int main(int argc, char **argv) { httpClose(http); http = httpConnect2(cupsServer(), ippPort(), NULL, AF_UNSPEC, cupsEncryption(), 0, netdata_update_every * 1000, NULL); if(http == NULL) { - error("cups daemon is not running. Exiting!"); + netdata_log_error("cups daemon is not running. Exiting!"); exit(1); } } @@ -321,7 +321,7 @@ int main(int argc, char **argv) { fprintf(stderr, "printer state is missing for destination %s", curr_dest->name); break; default: - error("Unknown printer state (%d) found.", printer_state); + netdata_log_error("Unknown printer state (%d) found.", printer_state); break; } @@ -364,7 +364,7 @@ int main(int argc, char **argv) { global_job_metrics.size_processing += curr_job->size; break; default: - error("Unsupported job state (%u) found.", curr_job->state); + netdata_log_error("Unsupported job state (%u) found.", curr_job->state); break; } } diff --git a/collectors/debugfs.plugin/debugfs_plugin.c b/collectors/debugfs.plugin/debugfs_plugin.c index d8d316e3c1..1c5bf106e9 100644 --- a/collectors/debugfs.plugin/debugfs_plugin.c +++ b/collectors/debugfs.plugin/debugfs_plugin.c @@ -30,18 +30,18 @@ static int debugfs_check_capabilities() { cap_t caps = cap_get_proc(); if (!caps) { - error("Cannot get current capabilities."); + netdata_log_error("Cannot get current capabilities."); return 0; } int ret = 1; cap_flag_value_t cfv = CAP_CLEAR; if (cap_get_flag(caps, CAP_DAC_READ_SEARCH, CAP_EFFECTIVE, &cfv) == -1) { - error("Cannot find if CAP_DAC_READ_SEARCH is effective."); + netdata_log_error("Cannot find if CAP_DAC_READ_SEARCH is effective."); ret = 0; } else { if (cfv != CAP_SET) { - error("debugfs.plugin should run with CAP_DAC_READ_SEARCH."); + netdata_log_error("debugfs.plugin should run with CAP_DAC_READ_SEARCH."); ret = 0; } } @@ -186,7 +186,7 @@ int main(int argc, char **argv) if (!debugfs_check_capabilities() && !debugfs_am_i_running_as_root() && !debugfs_check_sys_permission()) { uid_t uid = getuid(), euid = geteuid(); #ifdef HAVE_CAPABILITY - error( + netdata_log_error( "debugfs.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. " "Without these, debugfs.plugin cannot access /sys/kernel/debug. " "To enable capabilities run: sudo setcap cap_dac_read_search,cap_sys_ptrace+ep %s; " @@ -197,7 +197,7 @@ int main(int argc, char **argv) argv[0], argv[0]); #else - error( + netdata_log_error( "debugfs.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. " "Without these, debugfs.plugin cannot access /sys/kernel/debug." "Your system does not support capabilities. " diff --git a/collectors/debugfs.plugin/debugfs_zswap.c b/collectors/debugfs.plugin/debugfs_zswap.c index 9d66f96f3a..c8fc0f030a 100644 --- a/collectors/debugfs.plugin/debugfs_zswap.c +++ b/collectors/debugfs.plugin/debugfs_zswap.c @@ -251,7 +251,7 @@ int zswap_collect_data(struct netdata_zswap_metric *metric) snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, metric->filename); if (read_single_number_file(filename, (unsigned long long *)&metric->value)) { - error("Cannot read file %s", filename); + netdata_log_error("Cannot read file %s", filename); return 1; } diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c index a9afad803e..238286d92f 100644 --- a/collectors/ebpf.plugin/ebpf.c +++ b/collectors/ebpf.plugin/ebpf.c @@ -557,7 +557,7 @@ ARAL *ebpf_allocate_pid_aral(char *name, size_t size) { static size_t max_elements = NETDATA_EBPF_ALLOC_MAX_PID; if (max_elements < NETDATA_EBPF_ALLOC_MIN_ELEMENTS) { - error("Number of elements given is too small, adjusting it for %d", NETDATA_EBPF_ALLOC_MIN_ELEMENTS); + netdata_log_error("Number of elements given is too small, adjusting it for %d", NETDATA_EBPF_ALLOC_MIN_ELEMENTS); max_elements = NETDATA_EBPF_ALLOC_MIN_ELEMENTS; } @@ -593,7 +593,7 @@ static inline void ebpf_check_before2go() } if (i) { - error("eBPF cannot unload all threads on time, but it will go away"); + netdata_log_error("eBPF cannot unload all threads on time, but it will go away"); } } @@ -614,10 +614,10 @@ static void ebpf_exit() char filename[FILENAME_MAX + 1]; ebpf_pid_file(filename, FILENAME_MAX); if (unlink(filename)) - error("Cannot remove PID file %s", filename); + netdata_log_error("Cannot remove PID file %s", filename); #ifdef NETDATA_INTERNAL_CHECKS - error("Good bye world! I was PID %d", main_thread_id); + netdata_log_error("Good bye world! I was PID %d", main_thread_id); #endif fprintf(stdout, "EXIT\n"); fflush(stdout); @@ -670,7 +670,7 @@ static void ebpf_unload_unique_maps() if (ebpf_modules[i].enabled != NETDATA_THREAD_EBPF_STOPPED) { if (ebpf_modules[i].enabled != NETDATA_THREAD_EBPF_NOT_RUNNING) - error("Cannot unload maps for thread %s, because it is not stopped.", ebpf_modules[i].thread_name); + netdata_log_error("Cannot unload maps for thread %s, because it is not stopped.", ebpf_modules[i].thread_name); continue; } @@ -1605,7 +1605,7 @@ static void read_local_addresses() { struct ifaddrs *ifaddr, *ifa; if (getifaddrs(&ifaddr) == -1) { - error("Cannot get the local IP addresses, it is no possible to do separation between inbound and outbound connections"); + netdata_log_error("Cannot get the local IP addresses, it is no possible to do separation between inbound and outbound connections"); return; } @@ -1714,7 +1714,7 @@ static inline void how_to_load(char *ptr) else if (!strcasecmp(ptr, EBPF_CFG_LOAD_MODE_DEFAULT)) ebpf_set_thread_mode(MODE_ENTRY); else - error("the option %s for \"ebpf load mode\" is not a valid option.", ptr); + netdata_log_error("the option %s for \"ebpf load mode\" is not a valid option.", ptr); } /** @@ -2051,7 +2051,7 @@ void set_global_variables() ebpf_nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN); if (ebpf_nprocs < 0) { ebpf_nprocs = NETDATA_MAX_PROCESSOR; - error("Cannot identify number of process, using default value %d", ebpf_nprocs); + netdata_log_error("Cannot identify number of process, using default value %d", ebpf_nprocs); } isrh = get_redhat_release(); @@ -2080,12 +2080,12 @@ static inline void ebpf_load_thread_config() int ebpf_check_conditions() { if (!has_condition_to_run(running_on_kernel)) { - error("The current collector cannot run on this kernel."); + netdata_log_error("The current collector cannot run on this kernel."); return -1; } if (!am_i_running_as_root()) { - error( + netdata_log_error( "ebpf.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities..", (unsigned int)getuid(), (unsigned int)geteuid()); return -1; @@ -2105,7 +2105,7 @@ int ebpf_adjust_memory_limit() { struct rlimit r = { RLIM_INFINITY, RLIM_INFINITY }; if (setrlimit(RLIMIT_MEMLOCK, &r)) { - error("Setrlimit(RLIMIT_MEMLOCK)"); + netdata_log_error("Setrlimit(RLIMIT_MEMLOCK)"); return -1; } @@ -2398,7 +2398,7 @@ unittest: ebpf_user_config_dir, ebpf_stock_config_dir); if (ebpf_read_apps_groups_conf( &apps_groups_default_target, &apps_groups_root_target, ebpf_stock_config_dir, "groups")) { - error("Cannot read process groups '%s/apps_groups.conf'. There are no internal defaults. Failing.", + netdata_log_error("Cannot read process groups '%s/apps_groups.conf'. There are no internal defaults. Failing.", ebpf_stock_config_dir); ebpf_exit(); } @@ -2445,7 +2445,7 @@ static char *ebpf_get_process_name(pid_t pid) procfile *ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT); if(unlikely(!ff)) { - error("Cannot open %s", filename); + netdata_log_error("Cannot open %s", filename); return name; } diff --git a/collectors/ebpf.plugin/ebpf_apps.c b/collectors/ebpf.plugin/ebpf_apps.c index d3e778c866..a7091bc1c3 100644 --- a/collectors/ebpf.plugin/ebpf_apps.c +++ b/collectors/ebpf.plugin/ebpf_apps.c @@ -35,7 +35,7 @@ void ebpf_aral_init(void) { size_t max_elements = NETDATA_EBPF_ALLOC_MAX_PID; if (max_elements < NETDATA_EBPF_ALLOC_MIN_ELEMENTS) { - error("Number of elements given is too small, adjusting it for %d", NETDATA_EBPF_ALLOC_MIN_ELEMENTS); + netdata_log_error("Number of elements given is too small, adjusting it for %d", NETDATA_EBPF_ALLOC_MIN_ELEMENTS); max_elements = NETDATA_EBPF_ALLOC_MIN_ELEMENTS; } @@ -652,7 +652,7 @@ int ebpf_read_apps_groups_conf(struct ebpf_target **agdt, struct ebpf_target **a // add this target struct ebpf_target *n = get_apps_groups_target(agrt, s, w, name); if (!n) { - error("Cannot create target '%s' (line %zu, word %zu)", s, line, word); + netdata_log_error("Cannot create target '%s' (line %zu, word %zu)", s, line, word); continue; } @@ -755,32 +755,32 @@ static inline void debug_log_dummy(void) static inline int managed_log(struct ebpf_pid_stat *p, uint32_t log, int status) { if (unlikely(!status)) { - // error("command failed log %u, errno %d", log, errno); + // netdata_log_error("command failed log %u, errno %d", log, errno); if (unlikely(debug_enabled || errno != ENOENT)) { if (unlikely(debug_enabled || !(p->log_thrown & log))) { p->log_thrown |= log; switch (log) { case PID_LOG_IO: - error( + netdata_log_error( "Cannot process %s/proc/%d/io (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); break; case PID_LOG_STATUS: - error( + netdata_log_error( "Cannot process %s/proc/%d/status (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); break; case PID_LOG_CMDLINE: - error( + netdata_log_error( "Cannot process %s/proc/%d/cmdline (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); break; case PID_LOG_FDS: - error( + netdata_log_error( "Cannot process entries in %s/proc/%d/fd (command '%s')", netdata_configured_host_prefix, p->pid, p->comm); break; @@ -789,14 +789,14 @@ static inline int managed_log(struct ebpf_pid_stat *p, uint32_t log, int status) break; default: - error("unhandled error for pid %d, command '%s'", p->pid, p->comm); + netdata_log_error("unhandled error for pid %d, command '%s'", p->pid, p->comm); break; } } } errno = 0; } else if (unlikely(p->log_thrown & log)) { - // error("unsetting log %u on pid %d", log, p->pid); + // netdata_log_error("unsetting log %u on pid %d", log, p->pid); p->log_thrown &= ~log; } @@ -1005,7 +1005,7 @@ static inline int read_proc_pid_stat(struct ebpf_pid_stat *p, void *ptr) static inline int collect_data_for_pid(pid_t pid, void *ptr) { if (unlikely(pid < 0 || pid > pid_max)) { - error("Invalid pid %d read (expected %d to %d). Ignoring process.", pid, 0, pid_max); + netdata_log_error("Invalid pid %d read (expected %d to %d). Ignoring process.", pid, 0, pid_max); return 0; } @@ -1020,7 +1020,7 @@ static inline int collect_data_for_pid(pid_t pid, void *ptr) // check its parent pid if (unlikely(p->ppid < 0 || p->ppid > pid_max)) { - error("Pid %d (command '%s') states invalid parent pid %d. Using 0.", pid, p->comm, p->ppid); + netdata_log_error("Pid %d (command '%s') states invalid parent pid %d. Using 0.", pid, p->comm, p->ppid); p->ppid = 0; } @@ -1220,7 +1220,7 @@ static inline void del_pid_entry(pid_t pid) struct ebpf_pid_stat *p = ebpf_all_pids[pid]; if (unlikely(!p)) { - error("attempted to free pid %d that is not allocated.", pid); + netdata_log_error("attempted to free pid %d that is not allocated.", pid); return; } @@ -1403,7 +1403,7 @@ static inline void aggregate_pid_on_target(struct ebpf_target *w, struct ebpf_pi } if (unlikely(!w)) { - error("pid %d %s was left without a target!", p->pid, p->comm); + netdata_log_error("pid %d %s was left without a target!", p->pid, p->comm); return; } diff --git a/collectors/ebpf.plugin/ebpf_cachestat.c b/collectors/ebpf.plugin/ebpf_cachestat.c index c287136cfd..8008c4c55a 100644 --- a/collectors/ebpf.plugin/ebpf_cachestat.c +++ b/collectors/ebpf.plugin/ebpf_cachestat.c @@ -1220,7 +1220,7 @@ static int ebpf_cachestat_set_internal_value() } if (!address.addr) { - error("%s cachestat.", NETDATA_EBPF_DEFAULT_FNT_NOT_FOUND); + netdata_log_error("%s cachestat.", NETDATA_EBPF_DEFAULT_FNT_NOT_FOUND); return -1; } @@ -1261,7 +1261,7 @@ static int ebpf_cachestat_load_bpf(ebpf_module_t *em) #endif if (ret) - error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); + netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); return ret; } diff --git a/collectors/ebpf.plugin/ebpf_cgroup.c b/collectors/ebpf.plugin/ebpf_cgroup.c index 92b56709fb..fd4e783db1 100644 --- a/collectors/ebpf.plugin/ebpf_cgroup.c +++ b/collectors/ebpf.plugin/ebpf_cgroup.c @@ -28,7 +28,7 @@ static inline void *ebpf_cgroup_map_shm_locally(int fd, size_t length) value = mmap(NULL, length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (!value) { - error("Cannot map shared memory used between eBPF and cgroup, integration between processes won't happen"); + netdata_log_error("Cannot map shared memory used between eBPF and cgroup, integration between processes won't happen"); close(shm_fd_ebpf_cgroup); shm_fd_ebpf_cgroup = -1; shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME); @@ -71,7 +71,7 @@ void ebpf_map_cgroup_shared_memory() shm_fd_ebpf_cgroup = shm_open(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME, O_RDWR, 0660); if (shm_fd_ebpf_cgroup < 0) { if (limit_try == NETDATA_EBPF_CGROUP_MAX_TRIES) - error("Shared memory was not initialized, integration between processes won't happen."); + netdata_log_error("Shared memory was not initialized, integration between processes won't happen."); return; } @@ -103,7 +103,7 @@ void ebpf_map_cgroup_shared_memory() shm_sem_ebpf_cgroup = sem_open(NETDATA_NAMED_SEMAPHORE_EBPF_CGROUP_NAME, O_CREAT, 0660, 1); if (shm_sem_ebpf_cgroup == SEM_FAILED) { - error("Cannot create semaphore, integration between eBPF and cgroup won't happen"); + netdata_log_error("Cannot create semaphore, integration between eBPF and cgroup won't happen"); limit_try = NETDATA_EBPF_CGROUP_MAX_TRIES + 1; munmap(ebpf_mapped_memory, length); shm_ebpf_cgroup.header = NULL; diff --git a/collectors/ebpf.plugin/ebpf_dcstat.c b/collectors/ebpf.plugin/ebpf_dcstat.c index d3c4a530b6..cdcb55b2cd 100644 --- a/collectors/ebpf.plugin/ebpf_dcstat.c +++ b/collectors/ebpf.plugin/ebpf_dcstat.c @@ -1112,7 +1112,7 @@ static int ebpf_dcstat_load_bpf(ebpf_module_t *em) #endif if (ret) - error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); + netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); return ret; } diff --git a/collectors/ebpf.plugin/ebpf_disk.c b/collectors/ebpf.plugin/ebpf_disk.c index 026e44ec89..9f32628a22 100644 --- a/collectors/ebpf.plugin/ebpf_disk.c +++ b/collectors/ebpf.plugin/ebpf_disk.c @@ -341,7 +341,7 @@ static void update_disk_table(char *name, int major, int minor, time_t current_t netdata_ebpf_disks_t *check; check = (netdata_ebpf_disks_t *) avl_insert_lock(&disk_tree, (avl_t *)w); if (check != w) - error("Internal error, cannot insert the AVL tree."); + netdata_log_error("Internal error, cannot insert the AVL tree."); #ifdef NETDATA_INTERNAL_CHECKS netdata_log_info("The Latency is monitoring the hard disk %s (Major = %d, Minor = %d, Device = %u)", name, major, minor,w->dev); @@ -424,12 +424,12 @@ static void ebpf_disk_disable_tracepoints() char *default_message = { "Cannot disable the tracepoint" }; if (!was_block_issue_enabled) { if (ebpf_disable_tracing_values(tracepoint_block_type, tracepoint_block_issue)) - error("%s %s/%s.", default_message, tracepoint_block_type, tracepoint_block_issue); + netdata_log_error("%s %s/%s.", default_message, tracepoint_block_type, tracepoint_block_issue); } if (!was_block_rq_complete_enabled) { if (ebpf_disable_tracing_values(tracepoint_block_type, tracepoint_block_rq_complete)) - error("%s %s/%s.", default_message, tracepoint_block_type, tracepoint_block_rq_complete); + netdata_log_error("%s %s/%s.", default_message, tracepoint_block_type, tracepoint_block_rq_complete); } } @@ -814,7 +814,7 @@ static int ebpf_disk_load_bpf(ebpf_module_t *em) #endif if (ret) - error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); + netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); return ret; } @@ -845,7 +845,7 @@ void *ebpf_disk_thread(void *ptr) } if (pthread_mutex_init(&plot_mutex, NULL)) { - error("Cannot initialize local mutex"); + netdata_log_error("Cannot initialize local mutex"); goto enddisk; } diff --git a/collectors/ebpf.plugin/ebpf_fd.c b/collectors/ebpf.plugin/ebpf_fd.c index d39e6ae20d..cd7b1086a6 100644 --- a/collectors/ebpf.plugin/ebpf_fd.c +++ b/collectors/ebpf.plugin/ebpf_fd.c @@ -326,7 +326,7 @@ static inline int ebpf_fd_load_and_attach(struct fd_bpf *obj, ebpf_module_t *em) netdata_ebpf_program_loaded_t test = mt[NETDATA_FD_SYSCALL_OPEN].mode; if (ebpf_fd_set_target_values()) { - error("%s file descriptor.", NETDATA_EBPF_DEFAULT_FNT_NOT_FOUND); + netdata_log_error("%s file descriptor.", NETDATA_EBPF_DEFAULT_FNT_NOT_FOUND); return -1; } @@ -1125,7 +1125,7 @@ static int ebpf_fd_load_bpf(ebpf_module_t *em) #endif if (ret) - error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); + netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); return ret; } diff --git a/collectors/ebpf.plugin/ebpf_hardirq.c b/collectors/ebpf.plugin/ebpf_hardirq.c index 6dd19e313d..9a98963621 100644 --- a/collectors/ebpf.plugin/ebpf_hardirq.c +++ b/collectors/ebpf.plugin/ebpf_hardirq.c @@ -406,7 +406,7 @@ static int hardirq_read_latency_map(int mapfd) avl_t *check = avl_insert_lock(&hardirq_pub, (avl_t *)v); if (check != (avl_t *)v) { - error("Internal error, cannot insert the AVL tree."); + netdata_log_error("Internal error, cannot insert the AVL tree."); } } diff --git a/collectors/ebpf.plugin/ebpf_mdflush.c b/collectors/ebpf.plugin/ebpf_mdflush.c index 7179e5e7ce..e87bff2c28 100644 --- a/collectors/ebpf.plugin/ebpf_mdflush.c +++ b/collectors/ebpf.plugin/ebpf_mdflush.c @@ -241,7 +241,7 @@ static void mdflush_read_count_map(int maps_per_core) if (v_is_new) { avl_t *check = avl_insert_lock(&mdflush_pub, (avl_t *)v); if (check != (avl_t *)v) { - error("Internal error, cannot insert the AVL tree."); + netdata_log_error("Internal error, cannot insert the AVL tree."); } } } @@ -384,7 +384,7 @@ void *ebpf_mdflush_thread(void *ptr) char *md_flush_request = ebpf_find_symbol("md_flush_request"); if (!md_flush_request) { - error("Cannot monitor MD devices, because md is not loaded."); + netdata_log_error("Cannot monitor MD devices, because md is not loaded."); goto endmdflush; } @@ -393,7 +393,7 @@ void *ebpf_mdflush_thread(void *ptr) ebpf_adjust_thread_load(em, default_btf); #endif if (ebpf_mdflush_load_bpf(em)) { - error("Cannot load eBPF software."); + netdata_log_error("Cannot load eBPF software."); goto endmdflush; } diff --git a/collectors/ebpf.plugin/ebpf_mount.c b/collectors/ebpf.plugin/ebpf_mount.c index e48c892276..c511523993 100644 --- a/collectors/ebpf.plugin/ebpf_mount.c +++ b/collectors/ebpf.plugin/ebpf_mount.c @@ -408,7 +408,7 @@ static int ebpf_mount_load_bpf(ebpf_module_t *em) #endif if (ret) - error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); + netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); return ret; } diff --git a/collectors/ebpf.plugin/ebpf_oomkill.c b/collectors/ebpf.plugin/ebpf_oomkill.c index 6b6f62aa35..56f637ae95 100644 --- a/collectors/ebpf.plugin/ebpf_oomkill.c +++ b/collectors/ebpf.plugin/ebpf_oomkill.c @@ -259,7 +259,7 @@ static uint32_t oomkill_read_data(int32_t *keys) if (unlikely(test < 0)) { // since there's only 1 thread doing these deletions, it should be // impossible to get this condition. - error("key unexpectedly not available for deletion."); + netdata_log_error("key unexpectedly not available for deletion."); } } diff --git a/collectors/ebpf.plugin/ebpf_process.c b/collectors/ebpf.plugin/ebpf_process.c index 17a9809d3c..c38a733dac 100644 --- a/collectors/ebpf.plugin/ebpf_process.c +++ b/collectors/ebpf.plugin/ebpf_process.c @@ -683,17 +683,17 @@ static void ebpf_process_disable_tracepoints() char *default_message = { "Cannot disable the tracepoint" }; if (!was_sched_process_exit_enabled) { if (ebpf_disable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_exit)) - error("%s %s/%s.", default_message, tracepoint_sched_type, tracepoint_sched_process_exit); + netdata_log_error("%s %s/%s.", default_message, tracepoint_sched_type, tracepoint_sched_process_exit); } if (!was_sched_process_exec_enabled) { if (ebpf_disable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_exec)) - error("%s %s/%s.", default_message, tracepoint_sched_type, tracepoint_sched_process_exec); + netdata_log_error("%s %s/%s.", default_message, tracepoint_sched_type, tracepoint_sched_process_exec); } if (!was_sched_process_fork_enabled) { if (ebpf_disable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_fork)) - error("%s %s/%s.", default_message, tracepoint_sched_type, tracepoint_sched_process_fork); + netdata_log_error("%s %s/%s.", default_message, tracepoint_sched_type, tracepoint_sched_process_fork); } } diff --git a/collectors/ebpf.plugin/ebpf_shm.c b/collectors/ebpf.plugin/ebpf_shm.c index 94ac624b30..7e904e4b6c 100644 --- a/collectors/ebpf.plugin/ebpf_shm.c +++ b/collectors/ebpf.plugin/ebpf_shm.c @@ -1037,7 +1037,7 @@ static int ebpf_shm_load_bpf(ebpf_module_t *em) if (ret) - error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); + netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); return ret; } diff --git a/collectors/ebpf.plugin/ebpf_socket.c b/collectors/ebpf.plugin/ebpf_socket.c index 8cb5bf0dde..3e9c3036c2 100644 --- a/collectors/ebpf.plugin/ebpf_socket.c +++ b/collectors/ebpf.plugin/ebpf_socket.c @@ -1927,7 +1927,7 @@ static void store_socket_inside_avl(netdata_vector_plot_t *out, netdata_socket_t netdata_socket_plot_t *check ; check = (netdata_socket_plot_t *) avl_insert_lock(&out->tree, (avl_t *)w); if (check != w) - error("Internal error, cannot insert the AVL tree."); + netdata_log_error("Internal error, cannot insert the AVL tree."); #ifdef NETDATA_INTERNAL_CHECKS char iptext[INET6_ADDRSTRLEN]; @@ -3165,7 +3165,7 @@ static inline in_addr_t ipv4_network(in_addr_t addr, int prefix) static inline int ip2nl(uint8_t *dst, char *ip, int domain, char *source) { if (inet_pton(domain, ip, dst) <= 0) { - error("The address specified (%s) is invalid ", source); + netdata_log_error("The address specified (%s) is invalid ", source); return -1; } @@ -3639,7 +3639,7 @@ static void read_max_dimension(struct config *cfg) EBPF_MAXIMUM_DIMENSIONS, NETDATA_NV_CAP_VALUE); if (maxdim < 0) { - error("'maximum dimensions = %d' must be a positive number, Netdata will change for default value %ld.", + netdata_log_error("'maximum dimensions = %d' must be a positive number, Netdata will change for default value %ld.", maxdim, NETDATA_NV_CAP_VALUE); maxdim = NETDATA_NV_CAP_VALUE; } @@ -3827,7 +3827,7 @@ static void link_dimension_name(char *port, uint32_t hash, char *value) { int test = str2i(port); if (test < NETDATA_MINIMUM_PORT_VALUE || test > NETDATA_MAXIMUM_PORT_VALUE){ - error("The dimension given (%s = %s) has an invalid value and it will be ignored.", port, value); + netdata_log_error("The dimension given (%s = %s) has an invalid value and it will be ignored.", port, value); return; } @@ -3950,7 +3950,7 @@ static int ebpf_socket_load_bpf(ebpf_module_t *em) #endif if (ret) { - error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); + netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); } return ret; @@ -3975,7 +3975,7 @@ void *ebpf_socket_thread(void *ptr) parse_table_size_options(&socket_config); if (pthread_mutex_init(&nv_mutex, NULL)) { - error("Cannot initialize local mutex"); + netdata_log_error("Cannot initialize local mutex"); goto endsocket; } diff --git a/collectors/ebpf.plugin/ebpf_swap.c b/collectors/ebpf.plugin/ebpf_swap.c index 492b596782..e92a63d345 100644 --- a/collectors/ebpf.plugin/ebpf_swap.c +++ b/collectors/ebpf.plugin/ebpf_swap.c @@ -818,7 +818,7 @@ static int ebpf_swap_load_bpf(ebpf_module_t *em) #endif if (ret) - error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); + netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); return ret; } diff --git a/collectors/plugins.d/plugins_d.c b/collectors/plugins.d/plugins_d.c index 612f929ba6..eb6c658394 100644 --- a/collectors/plugins.d/plugins_d.c +++ b/collectors/plugins.d/plugins_d.c @@ -94,7 +94,7 @@ static void pluginsd_worker_thread_handle_success(struct plugind *cd) { } if (cd->serial_failures > SERIAL_FAILURES_THRESHOLD) { - error("PLUGINSD: 'host:'%s', '%s' (pid %d) does not generate useful output, " + netdata_log_error("PLUGINSD: 'host:'%s', '%s' (pid %d) does not generate useful output, " "although it reports success (exits with 0)." "We have tried to collect something %zu times - unsuccessfully. Disabling it.", rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, cd->serial_failures); @@ -112,14 +112,14 @@ static void pluginsd_worker_thread_handle_error(struct plugind *cd, int worker_r } if (!cd->successful_collections) { - error("PLUGINSD: 'host:%s', '%s' (pid %d) exited with error code %d and haven't collected any data. Disabling it.", + netdata_log_error("PLUGINSD: 'host:%s', '%s' (pid %d) exited with error code %d and haven't collected any data. Disabling it.", rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, worker_ret_code); plugin_set_disabled(cd); return; } if (cd->serial_failures <= SERIAL_FAILURES_THRESHOLD) { - error("PLUGINSD: 'host:%s', '%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times). %s", + netdata_log_error("PLUGINSD: 'host:%s', '%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times). %s", rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, worker_ret_code, cd->successful_collections, plugin_is_enabled(cd) ? "Waiting a bit before starting it again." : "Will not start it again - it is disabled."); sleep((unsigned int)(cd->update_every * 10)); @@ -127,7 +127,7 @@ static void pluginsd_worker_thread_handle_error(struct plugind *cd, int worker_r } if (cd->serial_failures > SERIAL_FAILURES_THRESHOLD) { - error("PLUGINSD: 'host:%s', '%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times)." + netdata_log_error("PLUGINSD: 'host:%s', '%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times)." "We tried to restart it %zu times, but it failed to generate data. Disabling it.", rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, worker_ret_code, cd->successful_collections, cd->serial_failures); @@ -153,7 +153,7 @@ static void *pluginsd_worker_thread(void *arg) { FILE *fp_child_output = netdata_popen(cd->cmd, &cd->unsafe.pid, &fp_child_input); if (unlikely(!fp_child_input || !fp_child_output)) { - error("PLUGINSD: 'host:%s', cannot popen(\"%s\", \"r\").", rrdhost_hostname(cd->host), cd->cmd); + netdata_log_error("PLUGINSD: 'host:%s', cannot popen(\"%s\", \"r\").", rrdhost_hostname(cd->host), cd->cmd); break; } @@ -235,7 +235,7 @@ void *pluginsd_main(void *ptr) if (unlikely(!dir)) { if (directory_errors[idx] != errno) { directory_errors[idx] = errno; - error("cannot open plugins directory '%s'", directory_name); + netdata_log_error("cannot open plugins directory '%s'", directory_name); } continue; } diff --git a/collectors/plugins.d/pluginsd_parser.c b/collectors/plugins.d/pluginsd_parser.c index 08331bcbde..b79fe02db0 100644 --- a/collectors/plugins.d/pluginsd_parser.c +++ b/collectors/plugins.d/pluginsd_parser.c @@ -22,7 +22,7 @@ static ssize_t send_to_plugin(const char *txt, void *data) { bytes = netdata_ssl_write(ssl, (void *) txt, strlen(txt)); else - error("PLUGINSD: cannot send command (SSL)"); + netdata_log_error("PLUGINSD: cannot send command (SSL)"); spinlock_unlock(&parser->writer.spinlock); return bytes; @@ -33,7 +33,7 @@ static ssize_t send_to_plugin(const char *txt, void *data) { bytes = fprintf(parser->fp_output, "%s", txt); if(bytes <= 0) { - error("PLUGINSD: cannot send command (FILE)"); + netdata_log_error("PLUGINSD: cannot send command (FILE)"); bytes = -2; } else @@ -51,7 +51,7 @@ static ssize_t send_to_plugin(const char *txt, void *data) { do { sent = write(parser->fd, &txt[bytes], total - bytes); if(sent <= 0) { - error("PLUGINSD: cannot send command (fd)"); + netdata_log_error("PLUGINSD: cannot send command (fd)"); spinlock_unlock(&parser->writer.spinlock); return -3; } @@ -64,7 +64,7 @@ static ssize_t send_to_plugin(const char *txt, void *data) { } spinlock_unlock(&parser->writer.spinlock); - error("PLUGINSD: cannot send command (no output socket/pipe/file given to plugins.d parser)"); + netdata_log_error("PLUGINSD: cannot send command (no output socket/pipe/file given to plugins.d parser)"); return -4; } @@ -72,7 +72,7 @@ static inline RRDHOST *pluginsd_require_host_from_parent(PARSER *parser, const c RRDHOST *host = parser->user.host; if(unlikely(!host)) - error("PLUGINSD: command %s requires a host, but is not set.", cmd); + netdata_log_error("PLUGINSD: command %s requires a host, but is not set.", cmd); return host; } @@ -81,7 +81,7 @@ static inline RRDSET *pluginsd_require_chart_from_parent(PARSER *parser, const c RRDSET *st = parser->user.st; if(unlikely(!st)) - error("PLUGINSD: command %s requires a chart defined via command %s, but is not set.", cmd, parent_cmd); + netdata_log_error("PLUGINSD: command %s requires a chart defined via command %s, but is not set.", cmd, parent_cmd); return st; } @@ -124,8 +124,10 @@ void pluginsd_rrdset_cleanup(RRDSET *st) { static inline void pluginsd_unlock_previous_chart(PARSER *parser, const char *keyword, bool stale) { if(unlikely(pluginsd_unlock_rrdset_data_collection(parser))) { if(stale) - error("PLUGINSD: 'host:%s/chart:%s/' stale data collection lock found during %s; it has been unlocked", - rrdhost_hostname(parser->user.st->rrdhost), rrdset_id(parser->user.st), keyword); + netdata_log_error("PLUGINSD: 'host:%s/chart:%s/' stale data collection lock found during %s; it has been unlocked", + rrdhost_hostname(parser->user.st->rrdhost), + rrdset_id(parser->user.st), + keyword); } if(unlikely(parser->user.v2.ml_locked)) { @@ -133,8 +135,10 @@ static inline void pluginsd_unlock_previous_chart(PARSER *parser, const char *ke parser->user.v2.ml_locked = false; if(stale) - error("PLUGINSD: 'host:%s/chart:%s/' stale ML lock found during %s, it has been unlocked", - rrdhost_hostname(parser->user.st->rrdhost), rrdset_id(parser->user.st), keyword); + netdata_log_error("PLUGINSD: 'host:%s/chart:%s/' stale ML lock found during %s, it has been unlocked", + rrdhost_hostname(parser->user.st->rrdhost), + rrdset_id(parser->user.st), + keyword); } } @@ -159,8 +163,8 @@ static inline void pluginsd_set_chart_from_parent(PARSER *parser, RRDSET *st, co static inline RRDDIM *pluginsd_acquire_dimension(RRDHOST *host, RRDSET *st, const char *dimension, const char *cmd) { if (unlikely(!dimension || !*dimension)) { - error("PLUGINSD: 'host:%s/chart:%s' got a %s, without a dimension.", - rrdhost_hostname(host), rrdset_id(st), cmd); + netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a %s, without a dimension.", + rrdhost_hostname(host), rrdset_id(st), cmd); return NULL; } @@ -181,8 +185,8 @@ static inline RRDDIM *pluginsd_acquire_dimension(RRDHOST *host, RRDSET *st, cons rda = rrddim_find_and_acquire(st, dimension); if (unlikely(!rda)) { - error("PLUGINSD: 'host:%s/chart:%s/dim:%s' got a %s but dimension does not exist.", - rrdhost_hostname(host), rrdset_id(st), dimension, cmd); + netdata_log_error("PLUGINSD: 'host:%s/chart:%s/dim:%s' got a %s but dimension does not exist.", + rrdhost_hostname(host), rrdset_id(st), dimension, cmd); return NULL; } @@ -195,15 +199,15 @@ static inline RRDDIM *pluginsd_acquire_dimension(RRDHOST *host, RRDSET *st, cons static inline RRDSET *pluginsd_find_chart(RRDHOST *host, const char *chart, const char *cmd) { if (unlikely(!chart || !*chart)) { - error("PLUGINSD: 'host:%s' got a %s without a chart id.", - rrdhost_hostname(host), cmd); + netdata_log_error("PLUGINSD: 'host:%s' got a %s without a chart id.", + rrdhost_hostname(host), cmd); return NULL; } RRDSET *st = rrdset_find(host, chart); if (unlikely(!st)) - error("PLUGINSD: 'host:%s/chart:%s' got a %s but chart does not exist.", - rrdhost_hostname(host), chart, cmd); + netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a %s but chart does not exist.", + rrdhost_hostname(host), chart, cmd); return st; } @@ -717,7 +721,7 @@ static void inflight_functions_insert_callback(const DICTIONARY_ITEM *item, void pf->sent_ut = now_realtime_usec(); if(ret < 0) { - error("FUNCTION: failed to send function to plugin, error %d", ret); + netdata_log_error("FUNCTION: failed to send function to plugin, error %d", ret); rrd_call_function_error(pf->destination_wb, "Failed to communicate with collector", HTTP_RESP_BACKEND_FETCH_FAILED); } else { @@ -731,7 +735,7 @@ static void inflight_functions_insert_callback(const DICTIONARY_ITEM *item, void static bool inflight_functions_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *func __maybe_unused, void *new_func, void *parser_ptr __maybe_unused) { struct inflight_function *pf = new_func; - error("PLUGINSD_PARSER: duplicate UUID on pending function '%s' detected. Ignoring the second one.", string2str(pf->function)); + netdata_log_error("PLUGINSD_PARSER: duplicate UUID on pending function '%s' detected. Ignoring the second one.", string2str(pf->function)); pf->code = rrd_call_function_error(pf->destination_wb, "This request is already in progress", HTTP_RESP_BAD_REQUEST); pf->callback(pf->destination_wb, pf->code, pf->callback_data); string_freez(pf->function); @@ -841,14 +845,14 @@ static inline PARSER_RC pluginsd_function(char **words, size_t num_words, PARSER if(!st) global = true; if (unlikely(!timeout_s || !name || !help || (!global && !st))) { - error("PLUGINSD: 'host:%s/chart:%s' got a FUNCTION, without providing the required data (global = '%s', name = '%s', timeout = '%s', help = '%s'). Ignoring it.", - rrdhost_hostname(host), - st?rrdset_id(st):"(unset)", - global?"yes":"no", - name?name:"(unset)", - timeout_s?timeout_s:"(unset)", - help?help:"(unset)" - ); + netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a FUNCTION, without providing the required data (global = '%s', name = '%s', timeout = '%s', help = '%s'). Ignoring it.", + rrdhost_hostname(host), + st?rrdset_id(st):"(unset)", + global?"yes":"no", + name?name:"(unset)", + timeout_s?timeout_s:"(unset)", + help?help:"(unset)" + ); return PARSER_RC_ERROR; } @@ -878,7 +882,7 @@ static inline PARSER_RC pluginsd_function_result_begin(char **words, size_t num_ char *expires = get_word(words, num_words, 4); if (unlikely(!key || !*key || !status || !*status || !format || !*format || !expires || !*expires)) { - error("got a " PLUGINSD_KEYWORD_FUNCTION_RESULT_BEGIN " without providing the required data (key = '%s', status = '%s', format = '%s', expires = '%s')." + netdata_log_error("got a " PLUGINSD_KEYWORD_FUNCTION_RESULT_BEGIN " without providing the required data (key = '%s', status = '%s', format = '%s', expires = '%s')." , key ? key : "(unset)" , status ? status : "(unset)" , format ? format : "(unset)" @@ -898,7 +902,7 @@ static inline PARSER_RC pluginsd_function_result_begin(char **words, size_t num_ pf = (struct inflight_function *)dictionary_get(parser->inflight.functions, key); if(!pf) { - error("got a " PLUGINSD_KEYWORD_FUNCTION_RESULT_BEGIN " for transaction '%s', but the transaction is not found.", key?key:"(unset)"); + netdata_log_error("got a " PLUGINSD_KEYWORD_FUNCTION_RESULT_BEGIN " for transaction '%s', but the transaction is not found.", key?key:"(unset)"); } else { if(format && *format) @@ -955,11 +959,11 @@ static inline PARSER_RC pluginsd_variable(char **words, size_t num_words, PARSER value = NULL; if (unlikely(!value)) { - error("PLUGINSD: 'host:%s/chart:%s' cannot set %s VARIABLE '%s' to an empty value", - rrdhost_hostname(host), - st ? rrdset_id(st):"UNSET", - (global) ? "HOST" : "CHART", - name); + netdata_log_error("PLUGINSD: 'host:%s/chart:%s' cannot set %s VARIABLE '%s' to an empty value", + rrdhost_hostname(host), + st ? rrdset_id(st):"UNSET", + (global) ? "HOST" : "CHART", + name); return PARSER_RC_OK; } @@ -970,18 +974,18 @@ static inline PARSER_RC pluginsd_variable(char **words, size_t num_words, PARSER v = (NETDATA_DOUBLE) str2ndd_encoded(value, &endptr); if (unlikely(endptr && *endptr)) { if (endptr == value) - error("PLUGINSD: 'host:%s/chart:%s' the value '%s' of VARIABLE '%s' cannot be parsed as a number", - rrdhost_hostname(host), - st ? rrdset_id(st):"UNSET", - value, - name); + netdata_log_error("PLUGINSD: 'host:%s/chart:%s' the value '%s' of VARIABLE '%s' cannot be parsed as a number", + rrdhost_hostname(host), + st ? rrdset_id(st):"UNSET", + value, + name); else - error("PLUGINSD: 'host:%s/chart:%s' the value '%s' of VARIABLE '%s' has leftovers: '%s'", - rrdhost_hostname(host), - st ? rrdset_id(st):"UNSET", - value, - name, - endptr); + netdata_log_error("PLUGINSD: 'host:%s/chart:%s' the value '%s' of VARIABLE '%s' has leftovers: '%s'", + rrdhost_hostname(host), + st ? rrdset_id(st):"UNSET", + value, + name, + endptr); } if (global) { @@ -991,9 +995,9 @@ static inline PARSER_RC pluginsd_variable(char **words, size_t num_words, PARSER rrdvar_custom_host_variable_release(host, rva); } else - error("PLUGINSD: 'host:%s' cannot find/create HOST VARIABLE '%s'", - rrdhost_hostname(host), - name); + netdata_log_error("PLUGINSD: 'host:%s' cannot find/create HOST VARIABLE '%s'", + rrdhost_hostname(host), + name); } else { const RRDSETVAR_ACQUIRED *rsa = rrdsetvar_custom_chart_variable_add_and_acquire(st, name); if (rsa) { @@ -1001,8 +1005,8 @@ static inline PARSER_RC pluginsd_variable(char **words, size_t num_words, PARSER rrdsetvar_custom_chart_variable_release(st, rsa); } else - error("PLUGINSD: 'host:%s/chart:%s' cannot find/create CHART VARIABLE '%s'", - rrdhost_hostname(host), rrdset_id(st), name); + netdata_log_error("PLUGINSD: 'host:%s/chart:%s' cannot find/create CHART VARIABLE '%s'", + rrdhost_hostname(host), rrdset_id(st), name); } return PARSER_RC_OK; @@ -1093,7 +1097,7 @@ static inline PARSER_RC pluginsd_clabel(char **words, size_t num_words, PARSER * const char *label_source = get_word(words, num_words, 3); if (!name || !value || !*label_source) { - error("Ignoring malformed or empty CHART LABEL command."); + netdata_log_error("Ignoring malformed or empty CHART LABEL command."); return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); } @@ -1118,7 +1122,7 @@ static inline PARSER_RC pluginsd_clabel_commit(char **words __maybe_unused, size debug(D_PLUGINSD, "requested to commit chart labels"); if(!parser->user.chart_rrdlabels_linked_temporarily) { - error("PLUGINSD: 'host:%s' got CLABEL_COMMIT, without a CHART or BEGIN. Ignoring it.", rrdhost_hostname(host)); + netdata_log_error("PLUGINSD: 'host:%s' got CLABEL_COMMIT, without a CHART or BEGIN. Ignoring it.", rrdhost_hostname(host)); return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); } @@ -1209,11 +1213,12 @@ static inline PARSER_RC pluginsd_replay_begin(char **words, size_t num_words, PA return PARSER_RC_OK; } - error("PLUGINSD REPLAY ERROR: 'host:%s/chart:%s' got a " PLUGINSD_KEYWORD_REPLAY_BEGIN - " from %ld to %ld, but timestamps are invalid " - "(now is %ld [%s], tolerance %ld). Ignoring " PLUGINSD_KEYWORD_REPLAY_SET, - rrdhost_hostname(st->rrdhost), rrdset_id(st), start_time, end_time, - wall_clock_time, wall_clock_comes_from_child ? "child wall clock" : "parent wall clock", tolerance); + netdata_log_error("PLUGINSD REPLAY ERROR: 'host:%s/chart:%s' got a " PLUGINSD_KEYWORD_REPLAY_BEGIN + " from %ld to %ld, but timestamps are invalid " + "(now is %ld [%s], tolerance %ld). Ignoring " PLUGINSD_KEYWORD_REPLAY_SET, + rrdhost_hostname(st->rrdhost), rrdset_id(st), start_time, end_time, + wall_clock_time, wall_clock_comes_from_child ? "child wall clock" : "parent wall clock", + tolerance); } // the child sends an RBEGIN without any parameters initially @@ -1279,7 +1284,7 @@ static inline PARSER_RC pluginsd_replay_set(char **words, size_t num_words, PARS if(!rd) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL); if (unlikely(!parser->user.replay.start_time || !parser->user.replay.end_time)) { - error("PLUGINSD: 'host:%s/chart:%s/dim:%s' got a %s with invalid timestamps %ld to %ld from a %s. Disabling it.", + netdata_log_error("PLUGINSD: 'host:%s/chart:%s/dim:%s' got a %s with invalid timestamps %ld to %ld from a %s. Disabling it.", rrdhost_hostname(host), rrdset_id(st), dimension, @@ -1391,7 +1396,7 @@ static inline PARSER_RC pluginsd_replay_rrdset_collection_state(char **words, si static inline PARSER_RC pluginsd_replay_end(char **words, size_t num_words, PARSER *parser) { if (num_words < 7) { // accepts 7, but the 7th is optional - error("REPLAY: malformed " PLUGINSD_KEYWORD_REPLAY_END " command"); + netdata_log_error("REPLAY: malformed " PLUGINSD_KEYWORD_REPLAY_END " command"); return PARSER_RC_ERROR; } @@ -1808,7 +1813,7 @@ static inline PARSER_RC streaming_claimed_id(char **words, size_t num_words, PAR const char *claim_id_str = get_word(words, num_words, 2); if (!host_uuid_str || !claim_id_str) { - error("Command CLAIMED_ID came malformed, uuid = '%s', claim_id = '%s'", + netdata_log_error("Command CLAIMED_ID came malformed, uuid = '%s', claim_id = '%s'", host_uuid_str ? host_uuid_str : "[unset]", claim_id_str ? claim_id_str : "[unset]"); return PARSER_RC_ERROR; @@ -1820,16 +1825,16 @@ static inline PARSER_RC streaming_claimed_id(char **words, size_t num_words, PAR // We don't need the parsed UUID // just do it to check the format if(uuid_parse(host_uuid_str, uuid)) { - error("1st parameter (host GUID) to CLAIMED_ID command is not valid GUID. Received: \"%s\".", host_uuid_str); + netdata_log_error("1st parameter (host GUID) to CLAIMED_ID command is not valid GUID. Received: \"%s\".", host_uuid_str); return PARSER_RC_ERROR; } if(uuid_parse(claim_id_str, uuid) && strcmp(claim_id_str, "NULL") != 0) { - error("2nd parameter (Claim ID) to CLAIMED_ID command is not valid GUID. Received: \"%s\".", claim_id_str); + netdata_log_error("2nd parameter (Claim ID) to CLAIMED_ID command is not valid GUID. Received: \"%s\".", claim_id_str); return PARSER_RC_ERROR; } if(strcmp(host_uuid_str, host->machine_guid) != 0) { - error("Claim ID is for host \"%s\" but it came over connection for \"%s\"", host_uuid_str, host->machine_guid); + netdata_log_error("Claim ID is for host \"%s\" but it came over connection for \"%s\"", host_uuid_str, host->machine_guid); return PARSER_RC_OK; //the message is OK problem must be somewhere else } @@ -1882,27 +1887,27 @@ static inline bool buffered_reader_read_timeout(struct buffered_reader *reader, return buffered_reader_read(reader, fd); else if(fds[0].revents & POLLERR) { - error("PARSER: read failed: POLLERR."); + netdata_log_error("PARSER: read failed: POLLERR."); return false; } else if(fds[0].revents & POLLHUP) { - error("PARSER: read failed: POLLHUP."); + netdata_log_error("PARSER: read failed: POLLHUP."); return false; } else if(fds[0].revents & POLLNVAL) { - error("PARSER: read failed: POLLNVAL."); + netdata_log_error("PARSER: read failed: POLLNVAL."); return false; } - error("PARSER: poll() returned positive number, but POLLIN|POLLERR|POLLHUP|POLLNVAL are not set."); + netdata_log_error("PARSER: poll() returned positive number, but POLLIN|POLLERR|POLLHUP|POLLNVAL are not set."); return false; } else if (ret == 0) { - error("PARSER: timeout while waiting for data."); + netdata_log_error("PARSER: timeout while waiting for data."); return false; } - error("PARSER: poll() failed with code %d.", ret); + netdata_log_error("PARSER: poll() failed with code %d.", ret); return false; } @@ -1927,13 +1932,13 @@ inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp_plugi } if (unlikely(fileno(fp_plugin_input) == -1)) { - error("input file descriptor given is not a valid stream"); + netdata_log_error("input file descriptor given is not a valid stream"); cd->serial_failures++; return 0; } if (unlikely(fileno(fp_plugin_output) == -1)) { - error("output file descriptor given is not a valid stream"); + netdata_log_error("output file descriptor given is not a valid stream"); cd->serial_failures++; return 0; } diff --git a/collectors/plugins.d/pluginsd_parser.h b/collectors/plugins.d/pluginsd_parser.h index cfbb20dee3..5e1ea12424 100644 --- a/collectors/plugins.d/pluginsd_parser.h +++ b/collectors/plugins.d/pluginsd_parser.h @@ -209,8 +209,8 @@ static inline int parser_action(PARSER *parser, char *input) { buffer_fast_strcat(wb, "\"", 1); } - error("PLUGINSD: parser_action('%s') failed on line %zu: { %s } (quotes added to show parsing)", - command, parser->line, buffer_tostring(wb)); + netdata_log_error("PLUGINSD: parser_action('%s') failed on line %zu: { %s } (quotes added to show parsing)", + command, parser->line, buffer_tostring(wb)); buffer_free(wb); } diff --git a/collectors/statsd.plugin/statsd.c b/collectors/statsd.plugin/statsd.c index 83f3fa922b..3360a4be31 100644 --- a/collectors/statsd.plugin/statsd.c +++ b/collectors/statsd.plugin/statsd.c @@ -571,7 +571,7 @@ static inline void statsd_process_set(STATSD_METRIC *m, const char *value) { if(!is_metric_useful_for_collection(m)) return; if(unlikely(!value || !*value)) { - error("STATSD: metric of type set, with empty value is ignored."); + netdata_log_error("STATSD: metric of type set, with empty value is ignored."); return; } @@ -606,7 +606,7 @@ static inline void statsd_process_dictionary(STATSD_METRIC *m, const char *value if(!is_metric_useful_for_collection(m)) return; if(unlikely(!value || !*value)) { - error("STATSD: metric of type set, with empty value is ignored."); + netdata_log_error("STATSD: metric of type set, with empty value is ignored."); return; } @@ -720,7 +720,7 @@ static void statsd_process_metric(const char *name, const char *value, const cha } else { statsd.unknown_types++; - error("STATSD: metric '%s' with value '%s' is sent with unknown metric type '%s'", name, value?value:"", type); + netdata_log_error("STATSD: metric '%s' with value '%s' is sent with unknown metric type '%s'", name, value?value:"", type); } if(m && tags && *tags) { @@ -892,14 +892,14 @@ static void statsd_del_callback(POLLINFO *pi) { if(t->type == STATSD_SOCKET_DATA_TYPE_TCP) { if(t->len != 0) { statsd.socket_errors++; - error("STATSD: client is probably sending unterminated metrics. Closed socket left with '%s'. Trying to process it.", t->buffer); + netdata_log_error("STATSD: client is probably sending unterminated metrics. Closed socket left with '%s'. Trying to process it.", t->buffer); statsd_process(t->buffer, t->len, 0); } statsd.tcp_socket_disconnects++; statsd.tcp_socket_connected--; } else - error("STATSD: internal error: received socket data type is %d, but expected %d", (int)t->type, (int)STATSD_SOCKET_DATA_TYPE_TCP); + netdata_log_error("STATSD: internal error: received socket data type is %d, but expected %d", (int)t->type, (int)STATSD_SOCKET_DATA_TYPE_TCP); freez(t); } @@ -920,7 +920,7 @@ static int statsd_rcv_callback(POLLINFO *pi, short int *events) { case SOCK_STREAM: { struct statsd_tcp *d = (struct statsd_tcp *)pi->data; if(unlikely(!d)) { - error("STATSD: internal error: expected TCP data pointer is NULL"); + netdata_log_error("STATSD: internal error: expected TCP data pointer is NULL"); statsd.socket_errors++; retval = -1; goto cleanup; @@ -928,7 +928,7 @@ static int statsd_rcv_callback(POLLINFO *pi, short int *events) { #ifdef NETDATA_INTERNAL_CHECKS if(unlikely(d->type != STATSD_SOCKET_DATA_TYPE_TCP)) { - error("STATSD: internal error: socket data type should be %d, but it is %d", (int)STATSD_SOCKET_DATA_TYPE_TCP, (int)d->type); + netdata_log_error("STATSD: internal error: socket data type should be %d, but it is %d", (int)STATSD_SOCKET_DATA_TYPE_TCP, (int)d->type); statsd.socket_errors++; retval = -1; goto cleanup; @@ -942,7 +942,7 @@ static int statsd_rcv_callback(POLLINFO *pi, short int *events) { if (rc < 0) { // read failed if (errno != EWOULDBLOCK && errno != EAGAIN && errno != EINTR) { - error("STATSD: recv() on TCP socket %d failed.", fd); + netdata_log_error("STATSD: recv() on TCP socket %d failed.", fd); statsd.socket_errors++; ret = -1; } @@ -976,7 +976,7 @@ static int statsd_rcv_callback(POLLINFO *pi, short int *events) { case SOCK_DGRAM: { struct statsd_udp *d = (struct statsd_udp *)pi->data; if(unlikely(!d)) { - error("STATSD: internal error: expected UDP data pointer is NULL"); + netdata_log_error("STATSD: internal error: expected UDP data pointer is NULL"); statsd.socket_errors++; retval = -1; goto cleanup; @@ -984,7 +984,7 @@ static int statsd_rcv_callback(POLLINFO *pi, short int *events) { #ifdef NETDATA_INTERNAL_CHECKS if(unlikely(d->type != STATSD_SOCKET_DATA_TYPE_UDP)) { - error("STATSD: internal error: socket data should be %d, but it is %d", (int)d->type, (int)STATSD_SOCKET_DATA_TYPE_UDP); + netdata_log_error("STATSD: internal error: socket data should be %d, but it is %d", (int)d->type, (int)STATSD_SOCKET_DATA_TYPE_UDP); statsd.socket_errors++; retval = -1; goto cleanup; @@ -998,7 +998,7 @@ static int statsd_rcv_callback(POLLINFO *pi, short int *events) { if (rc < 0) { // read failed if (errno != EWOULDBLOCK && errno != EAGAIN && errno != EINTR) { - error("STATSD: recvmmsg() on UDP socket %d failed.", fd); + netdata_log_error("STATSD: recvmmsg() on UDP socket %d failed.", fd); statsd.socket_errors++; retval = -1; goto cleanup; @@ -1024,7 +1024,7 @@ static int statsd_rcv_callback(POLLINFO *pi, short int *events) { if (rc < 0) { // read failed if (errno != EWOULDBLOCK && errno != EAGAIN && errno != EINTR) { - error("STATSD: recv() on UDP socket %d failed.", fd); + netdata_log_error("STATSD: recv() on UDP socket %d failed.", fd); statsd.socket_errors++; retval = -1; goto cleanup; @@ -1043,7 +1043,7 @@ static int statsd_rcv_callback(POLLINFO *pi, short int *events) { } default: { - error("STATSD: internal error: unknown socktype %d on socket %d", pi->socktype, fd); + netdata_log_error("STATSD: internal error: unknown socktype %d on socket %d", pi->socktype, fd); statsd.socket_errors++; retval = -1; goto cleanup; @@ -1061,7 +1061,7 @@ static int statsd_snd_callback(POLLINFO *pi, short int *events) { (void)events; worker_is_busy(WORKER_JOB_TYPE_SND_DATA); - error("STATSD: snd_callback() called, but we never requested to send data to statsd clients."); + netdata_log_error("STATSD: snd_callback() called, but we never requested to send data to statsd clients."); worker_is_idle(); return -1; @@ -1169,7 +1169,7 @@ static STATSD_APP_CHART_DIM_VALUE_TYPE string2valuetype(const char *type, size_t else if(!strcmp(type, "stddev")) return STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV; else if(!strcmp(type, "percentile")) return STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE; - error("STATSD: invalid type '%s' at line %zu of file '%s'. Using 'last'.", type, line, filename); + netdata_log_error("STATSD: invalid type '%s' at line %zu of file '%s'. Using 'last'.", type, line, filename); return STATSD_APP_CHART_DIM_VALUE_TYPE_LAST; } @@ -1244,7 +1244,7 @@ static int statsd_readfile(const char *filename, STATSD_APP *app, STATSD_APP_CHA FILE *fp = fopen(filename, "r"); if(!fp) { - error("STATSD: cannot open file '%s'.", filename); + netdata_log_error("STATSD: cannot open file '%s'.", filename); freez(buffer); return -1; } @@ -1281,7 +1281,7 @@ static int statsd_readfile(const char *filename, STATSD_APP *app, STATSD_APP_CHA freez(tmp); } else - error("STATSD: ignoring line %zu of file '%s', include filename is empty", line, filename); + netdata_log_error("STATSD: ignoring line %zu of file '%s', include filename is empty", line, filename); continue; } @@ -1348,20 +1348,20 @@ static int statsd_readfile(const char *filename, STATSD_APP *app, STATSD_APP_CHA } } else - error("STATSD: ignoring line %zu ('%s') of file '%s', [app] is not defined.", line, s, filename); + netdata_log_error("STATSD: ignoring line %zu ('%s') of file '%s', [app] is not defined.", line, s, filename); continue; } if(!app) { - error("STATSD: ignoring line %zu ('%s') of file '%s', it is outside all sections.", line, s, filename); + netdata_log_error("STATSD: ignoring line %zu ('%s') of file '%s', it is outside all sections.", line, s, filename); continue; } char *name = s; char *value = strchr(s, '='); if(!value) { - error("STATSD: ignoring line %zu ('%s') of file '%s', there is no = in it.", line, s, filename); + netdata_log_error("STATSD: ignoring line %zu ('%s') of file '%s', there is no = in it.", line, s, filename); continue; } *value = '\0'; @@ -1371,7 +1371,7 @@ static int statsd_readfile(const char *filename, STATSD_APP *app, STATSD_APP_CHA value = trim(value); if(!name || *name == '#') { - error("STATSD: ignoring line %zu of file '%s', name is empty.", line, filename); + netdata_log_error("STATSD: ignoring line %zu of file '%s', name is empty.", line, filename); continue; } if(!value) { @@ -1418,7 +1418,7 @@ static int statsd_readfile(const char *filename, STATSD_APP *app, STATSD_APP_CHA app->rrd_history_entries = 5; } else { - error("STATSD: ignoring line %zu ('%s') of file '%s'. Unknown keyword for the [app] section.", line, name, filename); + netdata_log_error("STATSD: ignoring line %zu ('%s') of file '%s'. Unknown keyword for the [app] section.", line, name, filename); continue; } } @@ -1512,7 +1512,7 @@ static int statsd_readfile(const char *filename, STATSD_APP *app, STATSD_APP_CHA dim->metric_pattern = simple_pattern_create(dim->metric, NULL, SIMPLE_PATTERN_EXACT, true); } else { - error("STATSD: ignoring line %zu ('%s') of file '%s'. Unknown keyword for the [%s] section.", line, name, filename, chart->id); + netdata_log_error("STATSD: ignoring line %zu ('%s') of file '%s'. Unknown keyword for the [%s] section.", line, name, filename, chart->id); continue; } } @@ -2049,7 +2049,7 @@ static inline void link_metric_to_app_dimension(STATSD_APP *app, STATSD_METRIC * } else { if (dim->value_type != STATSD_APP_CHART_DIM_VALUE_TYPE_LAST) - error("STATSD: unsupported value type for dimension '%s' of chart '%s' of app '%s' on metric '%s'", dim->name, chart->id, app->name, m->name); + netdata_log_error("STATSD: unsupported value type for dimension '%s' of chart '%s' of app '%s' on metric '%s'", dim->name, chart->id, app->name, m->name); dim->value_ptr = &m->last; dim->algorithm = statsd_algorithm_for_metric(m); diff --git a/collectors/timex.plugin/plugin_timex.c b/collectors/timex.plugin/plugin_timex.c index d6d7e6f8ca..025b699a17 100644 --- a/collectors/timex.plugin/plugin_timex.c +++ b/collectors/timex.plugin/plugin_timex.c @@ -79,7 +79,7 @@ void *timex_main(void *ptr) prev_sync_state = sync_state; if (non_seq_failure) { - error("Cannot get clock synchronization state"); + netdata_log_error("Cannot get clock synchronization state"); continue; } diff --git a/collectors/xenstat.plugin/xenstat_plugin.c b/collectors/xenstat.plugin/xenstat_plugin.c index bcd5220e66..acd072605f 100644 --- a/collectors/xenstat.plugin/xenstat_plugin.c +++ b/collectors/xenstat.plugin/xenstat_plugin.c @@ -178,7 +178,7 @@ static struct domain_metrics *domain_metrics_free(struct domain_metrics *d) { } if(unlikely(!cur)) { - error("XENSTAT: failed to free domain metrics."); + netdata_log_error("XENSTAT: failed to free domain metrics."); return NULL; } @@ -242,7 +242,7 @@ static int vcpu_metrics_collect(struct domain_metrics *d, xenstat_domain *domain vcpu = xenstat_domain_vcpu(domain, i); if(unlikely(!vcpu)) { - error("XENSTAT: cannot get VCPU statistics."); + netdata_log_error("XENSTAT: cannot get VCPU statistics."); return 1; } @@ -288,7 +288,7 @@ static int vbd_metrics_collect(struct domain_metrics *d, xenstat_domain *domain) vbd = xenstat_domain_vbd(domain, i); if(unlikely(!vbd)) { - error("XENSTAT: cannot get VBD statistics."); + netdata_log_error("XENSTAT: cannot get VBD statistics."); return 1; } @@ -336,7 +336,7 @@ static int network_metrics_collect(struct domain_metrics *d, xenstat_domain *dom network = xenstat_domain_network(domain, i); if(unlikely(!network)) { - error("XENSTAT: cannot get network statistics."); + netdata_log_error("XENSTAT: cannot get network statistics."); return 1; } @@ -368,7 +368,7 @@ static int xenstat_collect(xenstat_handle *xhandle, libxl_ctx *ctx, libxl_dominf xenstat_node *node = xenstat_get_node(xhandle, XENSTAT_ALL); if (unlikely(!node)) { - error("XENSTAT: failed to retrieve statistics from libxenstat."); + netdata_log_error("XENSTAT: failed to retrieve statistics from libxenstat."); return 1; } @@ -388,7 +388,7 @@ static int xenstat_collect(xenstat_handle *xhandle, libxl_ctx *ctx, libxl_dominf // get domain UUID unsigned int id = xenstat_domain_id(domain); if(unlikely(libxl_domain_info(ctx, info, id))) { - error("XENSTAT: cannot get domain info."); + netdata_log_error("XENSTAT: cannot get domain info."); } else { snprintfz(uuid, LIBXL_UUID_FMTLEN, LIBXL_UUID_FMT "\n", LIBXL_UUID_BYTES(info->uuid)); @@ -989,7 +989,7 @@ int main(int argc, char **argv) { exit(1); } - error("xenstat.plugin: ignoring parameter '%s'", argv[i]); + netdata_log_error("xenstat.plugin: ignoring parameter '%s'", argv[i]); } errno = 0; @@ -997,7 +997,7 @@ int main(int argc, char **argv) { if(freq >= netdata_update_every) netdata_update_every = freq; else if(freq) - error("update frequency %d seconds is too small for XENSTAT. Using %d.", freq, netdata_update_every); + netdata_log_error("update frequency %d seconds is too small for XENSTAT. Using %d.", freq, netdata_update_every); // ------------------------------------------------------------------------ // initialize xen API handles @@ -1008,13 +1008,13 @@ int main(int argc, char **argv) { if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: calling xenstat_init()\n"); xhandle = xenstat_init(); if (xhandle == NULL) { - error("XENSTAT: failed to initialize xenstat library."); + netdata_log_error("XENSTAT: failed to initialize xenstat library."); return 1; } if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: calling libxl_ctx_alloc()\n"); if (libxl_ctx_alloc(&ctx, LIBXL_VERSION, 0, NULL)) { - error("XENSTAT: failed to initialize xl context."); + netdata_log_error("XENSTAT: failed to initialize xl context."); xenstat_uninit(xhandle); return 1; } diff --git a/daemon/analytics.c b/daemon/analytics.c index 1dd337ff35..fb23589535 100644 --- a/daemon/analytics.c +++ b/daemon/analytics.c @@ -1035,11 +1035,11 @@ void send_statistics(const char *action, const char *action_result, const char * char *s = fgets(buffer, 4, fp_child_output); int exit_code = netdata_pclose(fp_child_input, fp_child_output, command_pid); if (exit_code) - error("Execution of anonymous statistics script returned %d.", exit_code); + netdata_log_error("Execution of anonymous statistics script returned %d.", exit_code); if (s && strncmp(buffer, "200", 3)) - error("Execution of anonymous statistics script returned http code %s.", buffer); + netdata_log_error("Execution of anonymous statistics script returned http code %s.", buffer); } else { - error("Failed to run anonymous statistics script %s.", as_script); + netdata_log_error("Failed to run anonymous statistics script %s.", as_script); } freez(command_to_run); } diff --git a/daemon/commands.c b/daemon/commands.c index 9a5c712021..84298416e6 100644 --- a/daemon/commands.c +++ b/daemon/commands.c @@ -251,8 +251,10 @@ static cmd_status_t cmd_read_config_execute(char *args, char **message) char *value = appconfig_get(tmp_config, temp + offset + 1, temp + offset2 + 1, NULL); if (value == NULL) { - error("Cannot execute read-config conf_file=%s section=%s / key=%s because no value set", conf_file, - temp + offset + 1, temp + offset2 + 1); + netdata_log_error("Cannot execute read-config conf_file=%s section=%s / key=%s because no value set", + conf_file, + temp + offset + 1, + temp + offset2 + 1); freez(temp); return CMD_STATUS_FAILURE; } @@ -449,7 +451,7 @@ static void send_command_reply(struct command_context *cmd_ctx, cmd_status_t sta write_buf.len = reply_string_size; ret = uv_write(&cmd_ctx->write_req, (uv_stream_t *)client, &write_buf, 1, pipe_write_cb); if (ret) { - error("uv_write(): %s", uv_strerror(ret)); + netdata_log_error("uv_write(): %s", uv_strerror(ret)); } } @@ -535,7 +537,7 @@ static void pipe_read_cb(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf netdata_log_info("EOF found in command pipe."); parse_commands(cmd_ctx); } else if (nread < 0) { - error("%s: %s", __func__, uv_strerror(nread)); + netdata_log_error("%s: %s", __func__, uv_strerror(nread)); } if (nread < 0) { /* stop stream due to EOF or error */ @@ -579,13 +581,13 @@ static void connection_cb(uv_stream_t *server, int status) client = (uv_pipe_t *)cmd_ctx; ret = uv_pipe_init(server->loop, client, 1); if (ret) { - error("uv_pipe_init(): %s", uv_strerror(ret)); + netdata_log_error("uv_pipe_init(): %s", uv_strerror(ret)); freez(cmd_ctx); return; } ret = uv_accept(server, (uv_stream_t *)client); if (ret) { - error("uv_accept(): %s", uv_strerror(ret)); + netdata_log_error("uv_accept(): %s", uv_strerror(ret)); uv_close((uv_handle_t *)client, pipe_close_cb); return; } @@ -598,7 +600,7 @@ static void connection_cb(uv_stream_t *server, int status) ret = uv_read_start((uv_stream_t*)client, alloc_cb, pipe_read_cb); if (ret) { - error("uv_read_start(): %s", uv_strerror(ret)); + netdata_log_error("uv_read_start(): %s", uv_strerror(ret)); uv_close((uv_handle_t *)client, pipe_close_cb); --clients; netdata_log_info("Command Clients = %u\n", clients); @@ -620,7 +622,7 @@ static void command_thread(void *arg) loop = mallocz(sizeof(uv_loop_t)); ret = uv_loop_init(loop); if (ret) { - error("uv_loop_init(): %s", uv_strerror(ret)); + netdata_log_error("uv_loop_init(): %s", uv_strerror(ret)); command_thread_error = ret; goto error_after_loop_init; } @@ -628,7 +630,7 @@ static void command_thread(void *arg) ret = uv_async_init(loop, &async, async_cb); if (ret) { - error("uv_async_init(): %s", uv_strerror(ret)); + netdata_log_error("uv_async_init(): %s", uv_strerror(ret)); command_thread_error = ret; goto error_after_async_init; } @@ -636,7 +638,7 @@ static void command_thread(void *arg) ret = uv_pipe_init(loop, &server_pipe, 0); if (ret) { - error("uv_pipe_init(): %s", uv_strerror(ret)); + netdata_log_error("uv_pipe_init(): %s", uv_strerror(ret)); command_thread_error = ret; goto error_after_pipe_init; } @@ -647,7 +649,7 @@ static void command_thread(void *arg) uv_fs_req_cleanup(&req); ret = uv_pipe_bind(&server_pipe, pipename); if (ret) { - error("uv_pipe_bind(): %s", uv_strerror(ret)); + netdata_log_error("uv_pipe_bind(): %s", uv_strerror(ret)); command_thread_error = ret; goto error_after_pipe_bind; } @@ -659,7 +661,7 @@ static void command_thread(void *arg) ret = uv_listen((uv_stream_t *)&server_pipe, 1, connection_cb); } if (ret) { - error("uv_listen(): %s", uv_strerror(ret)); + netdata_log_error("uv_listen(): %s", uv_strerror(ret)); command_thread_error = ret; goto error_after_uv_listen; } @@ -723,7 +725,7 @@ void commands_init(void) completion_init(&completion); error = uv_thread_create(&thread, command_thread, NULL); if (error) { - error("uv_thread_create(): %s", uv_strerror(error)); + netdata_log_error("uv_thread_create(): %s", uv_strerror(error)); goto after_error; } /* wait for worker thread to initialize */ @@ -734,7 +736,7 @@ void commands_init(void) if (command_thread_error) { error = uv_thread_join(&thread); if (error) { - error("uv_thread_create(): %s", uv_strerror(error)); + netdata_log_error("uv_thread_create(): %s", uv_strerror(error)); } goto after_error; } @@ -743,7 +745,7 @@ void commands_init(void) return; after_error: - error("Failed to initialize command server. The netdata cli tool will be unable to send commands."); + netdata_log_error("Failed to initialize command server. The netdata cli tool will be unable to send commands."); } void commands_exit(void) diff --git a/daemon/daemon.c b/daemon/daemon.c index e3287a70ac..9a0194ffb3 100644 --- a/daemon/daemon.c +++ b/daemon/daemon.c @@ -20,7 +20,7 @@ void get_netdata_execution_path(void) exepath_size = sizeof(exepath) - 1; ret = uv_exepath(exepath, &exepath_size); if (0 != ret) { - error("uv_exepath(\"%s\", %u) (user: %s) failed (%s).", exepath, (unsigned)exepath_size, user, + netdata_log_error("uv_exepath(\"%s\", %u) (user: %s) failed (%s).", exepath, (unsigned)exepath_size, user, uv_strerror(ret)); fatal("Cannot start netdata without getting execution path."); } @@ -33,13 +33,13 @@ static void chown_open_file(int fd, uid_t uid, gid_t gid) { struct stat buf; if(fstat(fd, &buf) == -1) { - error("Cannot fstat() fd %d", fd); + netdata_log_error("Cannot fstat() fd %d", fd); return; } if((buf.st_uid != uid || buf.st_gid != gid) && S_ISREG(buf.st_mode)) { if(fchown(fd, uid, gid) == -1) - error("Cannot fchown() fd %d.", fd); + netdata_log_error("Cannot fchown() fd %d.", fd); } } @@ -60,7 +60,7 @@ static void fix_directory_file_permissions(const char *dirname, uid_t uid, gid_t (void) snprintfz(filename, FILENAME_MAX, "%s/%s", dirname, de->d_name); if (de->d_type == DT_REG || recursive) { if (chown(filename, uid, gid) == -1) - error("Cannot chown %s '%s' to %u:%u", de->d_type == DT_DIR ? "directory" : "file", filename, (unsigned int)uid, (unsigned int)gid); + netdata_log_error("Cannot chown %s '%s' to %u:%u", de->d_type == DT_DIR ? "directory" : "file", filename, (unsigned int)uid, (unsigned int)gid); } if (de->d_type == DT_DIR && recursive) @@ -73,7 +73,7 @@ static void fix_directory_file_permissions(const char *dirname, uid_t uid, gid_t void change_dir_ownership(const char *dir, uid_t uid, gid_t gid, bool recursive) { if (chown(dir, uid, gid) == -1) - error("Cannot chown directory '%s' to %u:%u", dir, (unsigned int)uid, (unsigned int)gid); + netdata_log_error("Cannot chown directory '%s' to %u:%u", dir, (unsigned int)uid, (unsigned int)gid); fix_directory_file_permissions(dir, uid, gid, recursive); } @@ -89,7 +89,7 @@ void clean_directory(char *dirname) while((de = readdir(dir))) if(de->d_type == DT_REG) if (unlinkat(dir_fd, de->d_name, 0)) - error("Cannot delete %s/%s", dirname, de->d_name); + netdata_log_error("Cannot delete %s/%s", dirname, de->d_name); closedir(dir); } @@ -113,7 +113,7 @@ int become_user(const char *username, int pid_fd) { struct passwd *pw = getpwnam(username); if(!pw) { - error("User %s is not present.", username); + netdata_log_error("User %s is not present.", username); return -1; } @@ -127,7 +127,7 @@ int become_user(const char *username, int pid_fd) { if(pidfile[0]) { if(chown(pidfile, uid, gid) == -1) - error("Cannot chown '%s' to %u:%u", pidfile, (unsigned int)uid, (unsigned int)gid); + netdata_log_error("Cannot chown '%s' to %u:%u", pidfile, (unsigned int)uid, (unsigned int)gid); } int ngroups = (int)sysconf(_SC_NGROUPS_MAX); @@ -140,7 +140,7 @@ int become_user(const char *username, int pid_fd) { if(getgrouplist(username, gid, supplementary_groups, &ngroups) == -1) { #endif /* __APPLE__ */ if(am_i_root) - error("Cannot get supplementary groups of user '%s'.", username); + netdata_log_error("Cannot get supplementary groups of user '%s'.", username); ngroups = 0; } @@ -154,7 +154,7 @@ int become_user(const char *username, int pid_fd) { if(supplementary_groups && ngroups > 0) { if(setgroups((size_t)ngroups, supplementary_groups) == -1) { if(am_i_root) - error("Cannot set supplementary groups for user '%s'", username); + netdata_log_error("Cannot set supplementary groups for user '%s'", username); } ngroups = 0; } @@ -167,7 +167,7 @@ int become_user(const char *username, int pid_fd) { #else if(setresgid(gid, gid, gid) != 0) { #endif /* __APPLE__ */ - error("Cannot switch to user's %s group (gid: %u).", username, gid); + netdata_log_error("Cannot switch to user's %s group (gid: %u).", username, gid); return -1; } @@ -176,24 +176,24 @@ int become_user(const char *username, int pid_fd) { #else if(setresuid(uid, uid, uid) != 0) { #endif /* __APPLE__ */ - error("Cannot switch to user %s (uid: %u).", username, uid); + netdata_log_error("Cannot switch to user %s (uid: %u).", username, uid); return -1; } if(setgid(gid) != 0) { - error("Cannot switch to user's %s group (gid: %u).", username, gid); + netdata_log_error("Cannot switch to user's %s group (gid: %u).", username, gid); return -1; } if(setegid(gid) != 0) { - error("Cannot effectively switch to user's %s group (gid: %u).", username, gid); + netdata_log_error("Cannot effectively switch to user's %s group (gid: %u).", username, gid); return -1; } if(setuid(uid) != 0) { - error("Cannot switch to user %s (uid: %u).", username, uid); + netdata_log_error("Cannot switch to user %s (uid: %u).", username, uid); return -1; } if(seteuid(uid) != 0) { - error("Cannot effectively switch to user %s (uid: %u).", username, uid); + netdata_log_error("Cannot effectively switch to user %s (uid: %u).", username, uid); return -1; } @@ -213,7 +213,7 @@ static void oom_score_adj(void) { // read the existing score if(read_single_signed_number_file("/proc/self/oom_score_adj", &old_score)) { - error("Out-Of-Memory (OOM) score setting is not supported on this system."); + netdata_log_error("Out-Of-Memory (OOM) score setting is not supported on this system."); return; } @@ -243,12 +243,12 @@ static void oom_score_adj(void) { } if(wanted_score < OOM_SCORE_ADJ_MIN) { - error("Wanted Out-Of-Memory (OOM) score %d is too small. Using %d", (int)wanted_score, (int)OOM_SCORE_ADJ_MIN); + netdata_log_error("Wanted Out-Of-Memory (OOM) score %d is too small. Using %d", (int)wanted_score, (int)OOM_SCORE_ADJ_MIN); wanted_score = OOM_SCORE_ADJ_MIN; } if(wanted_score > OOM_SCORE_ADJ_MAX) { - error("Wanted Out-Of-Memory (OOM) score %d is too big. Using %d", (int)wanted_score, (int)OOM_SCORE_ADJ_MAX); + netdata_log_error("Wanted Out-Of-Memory (OOM) score %d is too big. Using %d", (int)wanted_score, (int)OOM_SCORE_ADJ_MAX); wanted_score = OOM_SCORE_ADJ_MAX; } @@ -267,24 +267,25 @@ static void oom_score_adj(void) { if(written) { if(read_single_signed_number_file("/proc/self/oom_score_adj", &final_score)) - error("Adjusted my Out-Of-Memory (OOM) score to %d, but cannot verify it.", (int)wanted_score); + netdata_log_error("Adjusted my Out-Of-Memory (OOM) score to %d, but cannot verify it.", (int)wanted_score); else if(final_score == wanted_score) netdata_log_info("Adjusted my Out-Of-Memory (OOM) score from %d to %d.", (int)old_score, (int)final_score); else - error("Adjusted my Out-Of-Memory (OOM) score from %d to %d, but it has been set to %d.", (int)old_score, (int)wanted_score, (int)final_score); + netdata_log_error("Adjusted my Out-Of-Memory (OOM) score from %d to %d, but it has been set to %d.", (int)old_score, (int)wanted_score, (int)final_score); analytics_report_oom_score(final_score); } else - error("Failed to adjust my Out-Of-Memory (OOM) score to %d. Running with %d. (systemd systems may change it via netdata.service)", (int)wanted_score, (int)old_score); + netdata_log_error("Failed to adjust my Out-Of-Memory (OOM) score to %d. Running with %d. (systemd systems may change it via netdata.service)", (int)wanted_score, (int)old_score); } else - error("Failed to adjust my Out-Of-Memory (OOM) score. Cannot open /proc/self/oom_score_adj for writing."); + netdata_log_error("Failed to adjust my Out-Of-Memory (OOM) score. Cannot open /proc/self/oom_score_adj for writing."); } static void process_nice_level(void) { #ifdef HAVE_NICE int nice_level = (int)config_get_number(CONFIG_SECTION_GLOBAL, "process nice level", 19); - if(nice(nice_level) == -1) error("Cannot set netdata CPU nice level to %d.", nice_level); + if(nice(nice_level) == -1) + netdata_log_error("Cannot set netdata CPU nice level to %d.", nice_level); else debug(D_SYSTEM, "Set netdata nice level to %d.", nice_level); #endif // HAVE_NICE }; @@ -341,7 +342,7 @@ struct sched_def { static void sched_getscheduler_report(void) { int sched = sched_getscheduler(0); if(sched == -1) { - error("Cannot get my current process scheduling policy."); + netdata_log_error("Cannot get my current process scheduling policy."); return; } else { @@ -351,7 +352,7 @@ static void sched_getscheduler_report(void) { if(scheduler_defaults[i].flags & SCHED_FLAG_PRIORITY_CONFIGURABLE) { struct sched_param param; if(sched_getparam(0, ¶m) == -1) { - error("Cannot get the process scheduling priority for my policy '%s'", scheduler_defaults[i].name); + netdata_log_error("Cannot get the process scheduling priority for my policy '%s'", scheduler_defaults[i].name); return; } else { @@ -406,14 +407,14 @@ static void sched_setscheduler_set(void) { #ifdef HAVE_SCHED_GET_PRIORITY_MIN errno = 0; if(priority < sched_get_priority_min(policy)) { - error("scheduler %s (%d) priority %d is below the minimum %d. Using the minimum.", name, policy, priority, sched_get_priority_min(policy)); + netdata_log_error("scheduler %s (%d) priority %d is below the minimum %d. Using the minimum.", name, policy, priority, sched_get_priority_min(policy)); priority = sched_get_priority_min(policy); } #endif #ifdef HAVE_SCHED_GET_PRIORITY_MAX errno = 0; if(priority > sched_get_priority_max(policy)) { - error("scheduler %s (%d) priority %d is above the maximum %d. Using the maximum.", name, policy, priority, sched_get_priority_max(policy)); + netdata_log_error("scheduler %s (%d) priority %d is above the maximum %d. Using the maximum.", name, policy, priority, sched_get_priority_max(policy)); priority = sched_get_priority_max(policy); } #endif @@ -422,7 +423,7 @@ static void sched_setscheduler_set(void) { } if(!found) { - error("Unknown scheduling policy '%s' - falling back to nice", name); + netdata_log_error("Unknown scheduling policy '%s' - falling back to nice", name); goto fallback; } @@ -433,7 +434,10 @@ static void sched_setscheduler_set(void) { errno = 0; i = sched_setscheduler(0, policy, ¶m); if(i != 0) { - error("Cannot adjust netdata scheduling policy to %s (%d), with priority %d. Falling back to nice.", name, policy, priority); + netdata_log_error("Cannot adjust netdata scheduling policy to %s (%d), with priority %d. Falling back to nice.", + name, + policy, + priority); } else { netdata_log_info("Adjusted netdata scheduling policy to %s (%d), with priority %d.", name, policy, priority); @@ -489,15 +493,16 @@ int become_daemon(int dont_fork, const char *user) pidfd = open(pidfile, O_WRONLY | O_CREAT, 0644); if(pidfd >= 0) { if(ftruncate(pidfd, 0) != 0) - error("Cannot truncate pidfile '%s'.", pidfile); + netdata_log_error("Cannot truncate pidfile '%s'.", pidfile); char b[100]; sprintf(b, "%d\n", getpid()); ssize_t i = write(pidfd, b, strlen(b)); if(i <= 0) - error("Cannot write pidfile '%s'.", pidfile); + netdata_log_error("Cannot write pidfile '%s'.", pidfile); } - else error("Failed to open pidfile '%s'.", pidfile); + else + netdata_log_error("Failed to open pidfile '%s'.", pidfile); } // Set new file permissions @@ -514,7 +519,7 @@ int become_daemon(int dont_fork, const char *user) if(user && *user) { if(become_user(user, pidfd) != 0) { - error("Cannot become user '%s'. Continuing as we are.", user); + netdata_log_error("Cannot become user '%s'. Continuing as we are.", user); } else debug(D_SYSTEM, "Successfully became user '%s'.", user); } diff --git a/daemon/main.c b/daemon/main.c index d38a9bc354..9bfaddc4e5 100644 --- a/daemon/main.c +++ b/daemon/main.c @@ -479,7 +479,7 @@ void netdata_cleanup_and_exit(int ret) { delta_shutdown_time("remove pid file"); if(unlink(pidfile) != 0) - error("EXIT: cannot unlink pidfile '%s'.", pidfile); + netdata_log_error("EXIT: cannot unlink pidfile '%s'.", pidfile); } #ifdef ENABLE_HTTPS @@ -518,7 +518,7 @@ int make_dns_decision(const char *section_name, const char *config_name, const c if(!strcmp("no",value)) return 0; if(strcmp("heuristic",value)) - error("Invalid configuration option '%s' for '%s'/'%s'. Valid options are 'yes', 'no' and 'heuristic'. Proceeding with 'heuristic'", + netdata_log_error("Invalid configuration option '%s' for '%s'/'%s'. Valid options are 'yes', 'no' and 'heuristic'. Proceeding with 'heuristic'", value, section_name, config_name); return simple_pattern_is_potential_name(p); @@ -592,17 +592,17 @@ void web_server_config_options(void) else if(!strcmp(s, "fixed")) web_gzip_strategy = Z_FIXED; else { - error("Invalid compression strategy '%s'. Valid strategies are 'default', 'filtered', 'huffman only', 'rle' and 'fixed'. Proceeding with 'default'.", s); + netdata_log_error("Invalid compression strategy '%s'. Valid strategies are 'default', 'filtered', 'huffman only', 'rle' and 'fixed'. Proceeding with 'default'.", s); web_gzip_strategy = Z_DEFAULT_STRATEGY; } web_gzip_level = (int)config_get_number(CONFIG_SECTION_WEB, "gzip compression level", 3); if(web_gzip_level < 1) { - error("Invalid compression level %d. Valid levels are 1 (fastest) to 9 (best ratio). Proceeding with level 1 (fastest compression).", web_gzip_level); + netdata_log_error("Invalid compression level %d. Valid levels are 1 (fastest) to 9 (best ratio). Proceeding with level 1 (fastest compression).", web_gzip_level); web_gzip_level = 1; } else if(web_gzip_level > 9) { - error("Invalid compression level %d. Valid levels are 1 (fastest) to 9 (best ratio). Proceeding with level 9 (best compression).", web_gzip_level); + netdata_log_error("Invalid compression level %d. Valid levels are 1 (fastest) to 9 (best ratio). Proceeding with level 9 (best compression).", web_gzip_level); web_gzip_level = 9; } } @@ -622,11 +622,11 @@ int killpid(pid_t pid) { return ret; case EPERM: - error("Cannot kill pid %d, but I do not have enough permissions.", pid); + netdata_log_error("Cannot kill pid %d, but I do not have enough permissions.", pid); break; default: - error("Cannot kill pid %d, but I received an error.", pid); + netdata_log_error("Cannot kill pid %d, but I received an error.", pid); break; } } @@ -637,7 +637,7 @@ int killpid(pid_t pid) { static void set_nofile_limit(struct rlimit *rl) { // get the num files allowed if(getrlimit(RLIMIT_NOFILE, rl) != 0) { - error("getrlimit(RLIMIT_NOFILE) failed"); + netdata_log_error("getrlimit(RLIMIT_NOFILE) failed"); return; } @@ -647,17 +647,17 @@ static void set_nofile_limit(struct rlimit *rl) { // make the soft/hard limits equal rl->rlim_cur = rl->rlim_max; if (setrlimit(RLIMIT_NOFILE, rl) != 0) { - error("setrlimit(RLIMIT_NOFILE, { %zu, %zu }) failed", (size_t)rl->rlim_cur, (size_t)rl->rlim_max); + netdata_log_error("setrlimit(RLIMIT_NOFILE, { %zu, %zu }) failed", (size_t)rl->rlim_cur, (size_t)rl->rlim_max); } // sanity check to make sure we have enough file descriptors available to open if (getrlimit(RLIMIT_NOFILE, rl) != 0) { - error("getrlimit(RLIMIT_NOFILE) failed"); + netdata_log_error("getrlimit(RLIMIT_NOFILE) failed"); return; } if (rl->rlim_cur < 1024) - error("Number of open file descriptors allowed for this process is too low (RLIMIT_NOFILE=%zu)", (size_t)rl->rlim_cur); + netdata_log_error("Number of open file descriptors allowed for this process is too low (RLIMIT_NOFILE=%zu)", (size_t)rl->rlim_cur); } void cancel_main_threads() { @@ -694,7 +694,7 @@ void cancel_main_threads() { if(found) { for (i = 0; static_threads[i].name != NULL ; i++) { if (static_threads[i].enabled != NETDATA_MAIN_THREAD_EXITED) - error("Main thread %s takes too long to exit. Giving up...", static_threads[i].name); + netdata_log_error("Main thread %s takes too long to exit. Giving up...", static_threads[i].name); } } else @@ -1056,7 +1056,7 @@ static void get_netdata_configured_variables() { char buf[HOSTNAME_MAX + 1]; if(gethostname(buf, HOSTNAME_MAX) == -1){ - error("Cannot get machine hostname."); + netdata_log_error("Cannot get machine hostname."); } netdata_configured_hostname = config_get(CONFIG_SECTION_GLOBAL, "hostname", buf); @@ -1067,7 +1067,7 @@ static void get_netdata_configured_variables() { default_rrd_update_every = (int) config_get_number(CONFIG_SECTION_DB, "update every", UPDATE_EVERY); if(default_rrd_update_every < 1 || default_rrd_update_every > 600) { - error("Invalid data collection frequency (update every) %d given. Defaulting to %d.", default_rrd_update_every, UPDATE_EVERY); + netdata_log_error("Invalid data collection frequency (update every) %d given. Defaulting to %d.", default_rrd_update_every, UPDATE_EVERY); default_rrd_update_every = UPDATE_EVERY; config_set_number(CONFIG_SECTION_DB, "update every", default_rrd_update_every); } @@ -1079,7 +1079,7 @@ static void get_netdata_configured_variables() { const char *mode = config_get(CONFIG_SECTION_DB, "mode", rrd_memory_mode_name(default_rrd_memory_mode)); default_rrd_memory_mode = rrd_memory_mode_id(mode); if(strcmp(mode, rrd_memory_mode_name(default_rrd_memory_mode)) != 0) { - error("Invalid memory mode '%s' given. Using '%s'", mode, rrd_memory_mode_name(default_rrd_memory_mode)); + netdata_log_error("Invalid memory mode '%s' given. Using '%s'", mode, rrd_memory_mode_name(default_rrd_memory_mode)); config_set(CONFIG_SECTION_DB, "mode", rrd_memory_mode_name(default_rrd_memory_mode)); } } @@ -1130,7 +1130,7 @@ static void get_netdata_configured_variables() { default_rrdeng_extent_cache_mb = 0; if(default_rrdeng_page_cache_mb < RRDENG_MIN_PAGE_CACHE_SIZE_MB) { - error("Invalid page cache size %d given. Defaulting to %d.", default_rrdeng_page_cache_mb, RRDENG_MIN_PAGE_CACHE_SIZE_MB); + netdata_log_error("Invalid page cache size %d given. Defaulting to %d.", default_rrdeng_page_cache_mb, RRDENG_MIN_PAGE_CACHE_SIZE_MB); default_rrdeng_page_cache_mb = RRDENG_MIN_PAGE_CACHE_SIZE_MB; config_set_number(CONFIG_SECTION_DB, "dbengine page cache size MB", default_rrdeng_page_cache_mb); } @@ -1140,14 +1140,14 @@ static void get_netdata_configured_variables() { default_rrdeng_disk_quota_mb = (int) config_get_number(CONFIG_SECTION_DB, "dbengine disk space MB", default_rrdeng_disk_quota_mb); if(default_rrdeng_disk_quota_mb < RRDENG_MIN_DISK_SPACE_MB) { - error("Invalid dbengine disk space %d given. Defaulting to %d.", default_rrdeng_disk_quota_mb, RRDENG_MIN_DISK_SPACE_MB); + netdata_log_error("Invalid dbengine disk space %d given. Defaulting to %d.", default_rrdeng_disk_quota_mb, RRDENG_MIN_DISK_SPACE_MB); default_rrdeng_disk_quota_mb = RRDENG_MIN_DISK_SPACE_MB; config_set_number(CONFIG_SECTION_DB, "dbengine disk space MB", default_rrdeng_disk_quota_mb); } default_multidb_disk_quota_mb = (int) config_get_number(CONFIG_SECTION_DB, "dbengine multihost disk space MB", compute_multidb_diskspace()); if(default_multidb_disk_quota_mb < RRDENG_MIN_DISK_SPACE_MB) { - error("Invalid multidb disk space %d given. Defaulting to %d.", default_multidb_disk_quota_mb, default_rrdeng_disk_quota_mb); + netdata_log_error("Invalid multidb disk space %d given. Defaulting to %d.", default_multidb_disk_quota_mb, default_rrdeng_disk_quota_mb); default_multidb_disk_quota_mb = default_rrdeng_disk_quota_mb; config_set_number(CONFIG_SECTION_DB, "dbengine multihost disk space MB", default_multidb_disk_quota_mb); } @@ -1229,7 +1229,7 @@ static bool load_netdata_conf(char *filename, char overwrite_used, char **user) if(filename && *filename) { ret = config_load(filename, overwrite_used, NULL); if(!ret) - error("CONFIG: cannot load config file '%s'.", filename); + netdata_log_error("CONFIG: cannot load config file '%s'.", filename); } else { filename = strdupz_path_subpath(netdata_configured_user_config_dir, "netdata.conf"); @@ -1263,7 +1263,7 @@ int get_system_info(struct rrdhost_system_info *system_info, bool log) { script = mallocz(sizeof(char) * (strlen(netdata_configured_primary_plugins_dir) + strlen("system-info.sh") + 2)); sprintf(script, "%s/%s", netdata_configured_primary_plugins_dir, "system-info.sh"); if (unlikely(access(script, R_OK) != 0)) { - error("System info script %s not found.",script); + netdata_log_error("System info script %s not found.",script); freez(script); return 1; } @@ -1289,7 +1289,7 @@ int get_system_info(struct rrdhost_system_info *system_info, bool log) { coverity_remove_taint(value); if(unlikely(rrdhost_set_system_info_variable(system_info, line, value))) { - error("Unexpected environment variable %s=%s", line, value); + netdata_log_error("Unexpected environment variable %s=%s", line, value); } else { if(log) @@ -1341,7 +1341,7 @@ int main(int argc, char **argv) { usec_t started_ut = now_monotonic_usec(); usec_t last_ut = started_ut; const char *prev_msg = NULL; - // Initialize stderror avoiding coredump when netdata_log_info() or error() is called + // Initialize stderror avoiding coredump when netdata_log_info() or netdata_log_error() is called stderror = stderr; int i; @@ -1386,7 +1386,7 @@ int main(int argc, char **argv) { switch(opt) { case 'c': if(!load_netdata_conf(optarg, 1, &user)) { - error("Cannot load configuration file %s.", optarg); + netdata_log_error("Cannot load configuration file %s.", optarg); return 1; } else { @@ -1881,7 +1881,7 @@ int main(int argc, char **argv) { if(debug_flags != 0) { struct rlimit rl = { RLIM_INFINITY, RLIM_INFINITY }; if(setrlimit(RLIMIT_CORE, &rl) != 0) - error("Cannot request unlimited core dumps for debugging... Proceeding anyway..."); + netdata_log_error("Cannot request unlimited core dumps for debugging... Proceeding anyway..."); #ifdef HAVE_SYS_PRCTL_H prctl(PR_SET_DUMPABLE, 1, 0, 0, 0); @@ -1985,7 +1985,7 @@ int main(int argc, char **argv) { if(debug_flags != 0) { struct rlimit rl = { RLIM_INFINITY, RLIM_INFINITY }; if(setrlimit(RLIMIT_CORE, &rl) != 0) - error("Cannot request unlimited core dumps for debugging... Proceeding anyway..."); + netdata_log_error("Cannot request unlimited core dumps for debugging... Proceeding anyway..."); #ifdef HAVE_SYS_PRCTL_H prctl(PR_SET_DUMPABLE, 1, 0, 0, 0); #endif @@ -2131,14 +2131,14 @@ int main(int argc, char **argv) { // ------------------------------------------------------------------------ // Report ACLK build failure #ifndef ENABLE_ACLK - error("This agent doesn't have ACLK."); + netdata_log_error("This agent doesn't have ACLK."); char filename[FILENAME_MAX + 1]; snprintfz(filename, FILENAME_MAX, "%s/.aclk_report_sent", netdata_configured_varlib_dir); if (netdata_anonymous_statistics_enabled > 0 && access(filename, F_OK)) { // -1 -> not initialized send_statistics("ACLK_DISABLED", "-", "-"); int fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 444); if (fd == -1) - error("Cannot create file '%s'. Please fix this.", filename); + netdata_log_error("Cannot create file '%s'. Please fix this.", filename); else close(fd); } diff --git a/daemon/service.c b/daemon/service.c index cab771e439..0db2b7508e 100644 --- a/daemon/service.c +++ b/daemon/service.c @@ -42,7 +42,7 @@ static void svc_rrddim_obsolete_to_archive(RRDDIM *rd) { if(cache_filename) { netdata_log_info("Deleting dimension file '%s'.", cache_filename); if (unlikely(unlink(cache_filename) == -1)) - error("Cannot delete dimension file '%s'", cache_filename); + netdata_log_error("Cannot delete dimension file '%s'", cache_filename); } if (rd->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) { diff --git a/daemon/signals.c b/daemon/signals.c index 4ef8124140..4bbcd440ea 100644 --- a/daemon/signals.c +++ b/daemon/signals.c @@ -59,7 +59,7 @@ void signals_block(void) { sigfillset(&sigset); if(pthread_sigmask(SIG_BLOCK, &sigset, NULL) == -1) - error("SIGNAL: Could not block signals for threads"); + netdata_log_error("SIGNAL: Could not block signals for threads"); } void signals_unblock(void) { @@ -67,7 +67,7 @@ void signals_unblock(void) { sigfillset(&sigset); if(pthread_sigmask(SIG_UNBLOCK, &sigset, NULL) == -1) { - error("SIGNAL: Could not unblock signals for threads"); + netdata_log_error("SIGNAL: Could not unblock signals for threads"); } } @@ -91,7 +91,7 @@ void signals_init(void) { } if(sigaction(signals_waiting[i].signo, &sa, NULL) == -1) - error("SIGNAL: Failed to change signal handler for: %s", signals_waiting[i].name); + netdata_log_error("SIGNAL: Failed to change signal handler for: %s", signals_waiting[i].name); } } @@ -104,7 +104,7 @@ void signals_restore_SIGCHLD(void) sa.sa_handler = signal_handler; if(sigaction(SIGCHLD, &sa, NULL) == -1) - error("SIGNAL: Failed to change signal handler for: SIGCHLD"); + netdata_log_error("SIGNAL: Failed to change signal handler for: SIGCHLD"); } void signals_reset(void) { @@ -116,7 +116,7 @@ void signals_reset(void) { int i; for (i = 0; signals_waiting[i].action != NETDATA_SIGNAL_END_OF_LIST; i++) { if(sigaction(signals_waiting[i].signo, &sa, NULL) == -1) - error("SIGNAL: Failed to reset signal handler for: %s", signals_waiting[i].name); + netdata_log_error("SIGNAL: Failed to reset signal handler for: %s", signals_waiting[i].name); } } @@ -128,14 +128,14 @@ static void reap_child(pid_t pid) { debug(D_CHILDS, "SIGNAL: reap_child(%d)...", pid); if (netdata_waitid(P_PID, (id_t)pid, &i, WEXITED|WNOHANG) == -1) { if (errno != ECHILD) - error("SIGNAL: waitid(%d): failed to wait for child", pid); + netdata_log_error("SIGNAL: waitid(%d): failed to wait for child", pid); else netdata_log_info("SIGNAL: waitid(%d): failed - it seems the child is already reaped", pid); return; } else if (i.si_pid == 0) { // Process didn't exit, this shouldn't happen. - error("SIGNAL: waitid(%d): reports pid 0 - child has not exited", pid); + netdata_log_error("SIGNAL: waitid(%d): reports pid 0 - child has not exited", pid); return; } @@ -248,6 +248,6 @@ void signals_handle(void) { } } else - error("SIGNAL: pause() returned but it was not interrupted by a signal."); + netdata_log_error("SIGNAL: pause() returned but it was not interrupted by a signal."); } } diff --git a/database/contexts/api_v1.c b/database/contexts/api_v1.c index daf945eebf..b4bcfe4ae3 100644 --- a/database/contexts/api_v1.c +++ b/database/contexts/api_v1.c @@ -356,7 +356,7 @@ static inline int rrdcontext_to_json_callback(const DICTIONARY_ITEM *item, void int rrdcontext_to_json(RRDHOST *host, BUFFER *wb, time_t after, time_t before, RRDCONTEXT_TO_JSON_OPTIONS options, const char *context, SIMPLE_PATTERN *chart_label_key, SIMPLE_PATTERN *chart_labels_filter, SIMPLE_PATTERN *chart_dimensions) { if(!host->rrdctx.contexts) { - error("%s(): request for host '%s' that does not have rrdcontexts initialized.", __FUNCTION__, rrdhost_hostname(host)); + netdata_log_error("%s(): request for host '%s' that does not have rrdcontexts initialized.", __FUNCTION__, rrdhost_hostname(host)); return HTTP_RESP_NOT_FOUND; } @@ -393,7 +393,7 @@ int rrdcontext_to_json(RRDHOST *host, BUFFER *wb, time_t after, time_t before, R int rrdcontexts_to_json(RRDHOST *host, BUFFER *wb, time_t after, time_t before, RRDCONTEXT_TO_JSON_OPTIONS options, SIMPLE_PATTERN *chart_label_key, SIMPLE_PATTERN *chart_labels_filter, SIMPLE_PATTERN *chart_dimensions) { if(!host->rrdctx.contexts) { - error("%s(): request for host '%s' that does not have rrdcontexts initialized.", __FUNCTION__, rrdhost_hostname(host)); + netdata_log_error("%s(): request for host '%s' that does not have rrdcontexts initialized.", __FUNCTION__, rrdhost_hostname(host)); return HTTP_RESP_NOT_FOUND; } diff --git a/database/contexts/context.c b/database/contexts/context.c index 75c8d4d3c3..47946f1e05 100644 --- a/database/contexts/context.c +++ b/database/contexts/context.c @@ -33,7 +33,7 @@ static void rrdcontext_insert_callback(const DICTIONARY_ITEM *item __maybe_unuse // we are loading data from the SQL database if(rc->version) - error("RRDCONTEXT: context '%s' is already initialized with version %"PRIu64", but it is loaded again from SQL with version %"PRIu64"", string2str(rc->id), rc->version, rc->hub.version); + netdata_log_error("RRDCONTEXT: context '%s' is already initialized with version %"PRIu64", but it is loaded again from SQL with version %"PRIu64"", string2str(rc->id), rc->version, rc->hub.version); // IMPORTANT // replace all string pointers in rc->hub with our own versions diff --git a/database/contexts/instance.c b/database/contexts/instance.c index 665022afd6..7e572fb80a 100644 --- a/database/contexts/instance.c +++ b/database/contexts/instance.c @@ -407,13 +407,13 @@ inline void rrdinstance_from_rrdset(RRDSET *st) { #define rrdset_get_rrdinstance(st) rrdset_get_rrdinstance_with_trace(st, __FUNCTION__); static inline RRDINSTANCE *rrdset_get_rrdinstance_with_trace(RRDSET *st, const char *function) { if(unlikely(!st->rrdinstance)) { - error("RRDINSTANCE: RRDSET '%s' is not linked to an RRDINSTANCE at %s()", rrdset_id(st), function); + netdata_log_error("RRDINSTANCE: RRDSET '%s' is not linked to an RRDINSTANCE at %s()", rrdset_id(st), function); return NULL; } RRDINSTANCE *ri = rrdinstance_acquired_value(st->rrdinstance); if(unlikely(!ri)) { - error("RRDINSTANCE: RRDSET '%s' lost its link to an RRDINSTANCE at %s()", rrdset_id(st), function); + netdata_log_error("RRDINSTANCE: RRDSET '%s' lost its link to an RRDINSTANCE at %s()", rrdset_id(st), function); return NULL; } diff --git a/database/contexts/metric.c b/database/contexts/metric.c index 69839021a8..55efde4e9f 100644 --- a/database/contexts/metric.c +++ b/database/contexts/metric.c @@ -263,13 +263,13 @@ void rrdmetric_from_rrddim(RRDDIM *rd) { #define rrddim_get_rrdmetric(rd) rrddim_get_rrdmetric_with_trace(rd, __FUNCTION__) static inline RRDMETRIC *rrddim_get_rrdmetric_with_trace(RRDDIM *rd, const char *function) { if(unlikely(!rd->rrdmetric)) { - error("RRDMETRIC: RRDDIM '%s' is not linked to an RRDMETRIC at %s()", rrddim_id(rd), function); + netdata_log_error("RRDMETRIC: RRDDIM '%s' is not linked to an RRDMETRIC at %s()", rrddim_id(rd), function); return NULL; } RRDMETRIC *rm = rrdmetric_acquired_value(rd->rrdmetric); if(unlikely(!rm)) { - error("RRDMETRIC: RRDDIM '%s' lost the link to its RRDMETRIC at %s()", rrddim_id(rd), function); + netdata_log_error("RRDMETRIC: RRDDIM '%s' lost the link to its RRDMETRIC at %s()", rrddim_id(rd), function); return NULL; } diff --git a/database/contexts/query_target.c b/database/contexts/query_target.c index 4589eaae8d..defb5acdd1 100644 --- a/database/contexts/query_target.c +++ b/database/contexts/query_target.c @@ -895,12 +895,12 @@ static ssize_t query_node_add(void *data, RRDHOST *host, bool queryable_host) { // is the chart given valid? if(unlikely(qtl->st && (!qtl->st->rrdinstance || !qtl->st->rrdcontext))) { - error("QUERY TARGET: RRDSET '%s' given, but it is not linked to rrdcontext structures. Linking it now.", rrdset_name(qtl->st)); + netdata_log_error("QUERY TARGET: RRDSET '%s' given, but it is not linked to rrdcontext structures. Linking it now.", rrdset_name(qtl->st)); rrdinstance_from_rrdset(qtl->st); if(unlikely(qtl->st && (!qtl->st->rrdinstance || !qtl->st->rrdcontext))) { - error("QUERY TARGET: RRDSET '%s' given, but failed to be linked to rrdcontext structures. Switching to context query.", - rrdset_name(qtl->st)); + netdata_log_error("QUERY TARGET: RRDSET '%s' given, but failed to be linked to rrdcontext structures. Switching to context query.", + rrdset_name(qtl->st)); if (!is_valid_sp(qtl->instances)) qtl->instances = rrdset_name(qtl->st); @@ -1098,7 +1098,7 @@ QUERY_TARGET *query_target_create(QUERY_TARGET_REQUEST *qtr) { } else if (unlikely(host != qtl.st->rrdhost)) { // Oops! A different host! - error("QUERY TARGET: RRDSET '%s' given does not belong to host '%s'. Switching query host to '%s'", + netdata_log_error("QUERY TARGET: RRDSET '%s' given does not belong to host '%s'. Switching query host to '%s'", rrdset_name(qtl.st), rrdhost_hostname(host), rrdhost_hostname(qtl.st->rrdhost)); host = qtl.st->rrdhost; } diff --git a/database/contexts/rrdcontext.c b/database/contexts/rrdcontext.c index 172c292f0a..92a842c0ac 100644 --- a/database/contexts/rrdcontext.c +++ b/database/contexts/rrdcontext.c @@ -224,25 +224,26 @@ void rrdcontext_hub_checkpoint_command(void *ptr) { struct ctxs_checkpoint *cmd = ptr; if(!rrdhost_check_our_claim_id(cmd->claim_id)) { - error("RRDCONTEXT: received checkpoint command for claim_id '%s', node id '%s', but this is not our claim id. Ours '%s', received '%s'. Ignoring command.", - cmd->claim_id, cmd->node_id, - localhost->aclk_state.claimed_id?localhost->aclk_state.claimed_id:"NOT SET", - cmd->claim_id); + netdata_log_error("RRDCONTEXT: received checkpoint command for claim_id '%s', node id '%s', but this is not our claim id. Ours '%s', received '%s'. Ignoring command.", + cmd->claim_id, cmd->node_id, + localhost->aclk_state.claimed_id?localhost->aclk_state.claimed_id:"NOT SET", + cmd->claim_id); return; } RRDHOST *host = rrdhost_find_by_node_id(cmd->node_id); if(!host) { - error("RRDCONTEXT: received checkpoint command for claim id '%s', node id '%s', but there is no node with such node id here. Ignoring command.", - cmd->claim_id, cmd->node_id); + netdata_log_error("RRDCONTEXT: received checkpoint command for claim id '%s', node id '%s', but there is no node with such node id here. Ignoring command.", + cmd->claim_id, + cmd->node_id); return; } if(rrdhost_flag_check(host, RRDHOST_FLAG_ACLK_STREAM_CONTEXTS)) { netdata_log_info("RRDCONTEXT: received checkpoint command for claim id '%s', node id '%s', while node '%s' has an active context streaming.", - cmd->claim_id, cmd->node_id, rrdhost_hostname(host)); + cmd->claim_id, cmd->node_id, rrdhost_hostname(host)); // disable it temporarily, so that our worker will not attempt to send messages in parallel rrdhost_flag_clear(host, RRDHOST_FLAG_ACLK_STREAM_CONTEXTS); @@ -251,8 +252,8 @@ void rrdcontext_hub_checkpoint_command(void *ptr) { uint64_t our_version_hash = rrdcontext_version_hash(host); if(cmd->version_hash != our_version_hash) { - error("RRDCONTEXT: received version hash %"PRIu64" for host '%s', does not match our version hash %"PRIu64". Sending snapshot of all contexts.", - cmd->version_hash, rrdhost_hostname(host), our_version_hash); + netdata_log_error("RRDCONTEXT: received version hash %"PRIu64" for host '%s', does not match our version hash %"PRIu64". Sending snapshot of all contexts.", + cmd->version_hash, rrdhost_hostname(host), our_version_hash); #ifdef ENABLE_ACLK // prepare the snapshot @@ -285,25 +286,25 @@ void rrdcontext_hub_stop_streaming_command(void *ptr) { struct stop_streaming_ctxs *cmd = ptr; if(!rrdhost_check_our_claim_id(cmd->claim_id)) { - error("RRDCONTEXT: received stop streaming command for claim_id '%s', node id '%s', but this is not our claim id. Ours '%s', received '%s'. Ignoring command.", - cmd->claim_id, cmd->node_id, - localhost->aclk_state.claimed_id?localhost->aclk_state.claimed_id:"NOT SET", - cmd->claim_id); + netdata_log_error("RRDCONTEXT: received stop streaming command for claim_id '%s', node id '%s', but this is not our claim id. Ours '%s', received '%s'. Ignoring command.", + cmd->claim_id, cmd->node_id, + localhost->aclk_state.claimed_id?localhost->aclk_state.claimed_id:"NOT SET", + cmd->claim_id); return; } RRDHOST *host = rrdhost_find_by_node_id(cmd->node_id); if(!host) { - error("RRDCONTEXT: received stop streaming command for claim id '%s', node id '%s', but there is no node with such node id here. Ignoring command.", - cmd->claim_id, cmd->node_id); + netdata_log_error("RRDCONTEXT: received stop streaming command for claim id '%s', node id '%s', but there is no node with such node id here. Ignoring command.", + cmd->claim_id, cmd->node_id); return; } if(!rrdhost_flag_check(host, RRDHOST_FLAG_ACLK_STREAM_CONTEXTS)) { - error("RRDCONTEXT: received stop streaming command for claim id '%s', node id '%s', but node '%s' does not have active context streaming. Ignoring command.", - cmd->claim_id, cmd->node_id, rrdhost_hostname(host)); + netdata_log_error("RRDCONTEXT: received stop streaming command for claim id '%s', node id '%s', but node '%s' does not have active context streaming. Ignoring command.", + cmd->claim_id, cmd->node_id, rrdhost_hostname(host)); return; } diff --git a/database/contexts/worker.c b/database/contexts/worker.c index 6c8d4b66f4..e6c3ff3df4 100644 --- a/database/contexts/worker.c +++ b/database/contexts/worker.c @@ -350,7 +350,8 @@ void rrdcontext_delete_from_sql_unsafe(RRDCONTEXT *rc) { // delete it from SQL if(ctx_delete_context(&rc->rrdhost->host_uuid, &rc->hub) != 0) - error("RRDCONTEXT: failed to delete context '%s' version %"PRIu64" from SQL.", rc->hub.id, rc->hub.version); + netdata_log_error("RRDCONTEXT: failed to delete context '%s' version %"PRIu64" from SQL.", + rc->hub.id, rc->hub.version); } static void rrdcontext_garbage_collect_single_host(RRDHOST *host, bool worker_jobs) { @@ -374,11 +375,11 @@ static void rrdcontext_garbage_collect_single_host(RRDHOST *host, bool worker_jo if(rrdmetric_should_be_deleted(rm)) { if(worker_jobs) worker_is_busy(WORKER_JOB_CLEANUP_DELETE); if(!dictionary_del(ri->rrdmetrics, string2str(rm->id))) - error("RRDCONTEXT: metric '%s' of instance '%s' of context '%s' of host '%s', failed to be deleted from rrdmetrics dictionary.", - string2str(rm->id), - string2str(ri->id), - string2str(rc->id), - rrdhost_hostname(host)); + netdata_log_error("RRDCONTEXT: metric '%s' of instance '%s' of context '%s' of host '%s', failed to be deleted from rrdmetrics dictionary.", + string2str(rm->id), + string2str(ri->id), + string2str(rc->id), + rrdhost_hostname(host)); else internal_error( true, @@ -394,10 +395,10 @@ static void rrdcontext_garbage_collect_single_host(RRDHOST *host, bool worker_jo if(rrdinstance_should_be_deleted(ri)) { if(worker_jobs) worker_is_busy(WORKER_JOB_CLEANUP_DELETE); if(!dictionary_del(rc->rrdinstances, string2str(ri->id))) - error("RRDCONTEXT: instance '%s' of context '%s' of host '%s', failed to be deleted from rrdmetrics dictionary.", - string2str(ri->id), - string2str(rc->id), - rrdhost_hostname(host)); + netdata_log_error("RRDCONTEXT: instance '%s' of context '%s' of host '%s', failed to be deleted from rrdmetrics dictionary.", + string2str(ri->id), + string2str(rc->id), + rrdhost_hostname(host)); else internal_error( true, @@ -415,7 +416,7 @@ static void rrdcontext_garbage_collect_single_host(RRDHOST *host, bool worker_jo rrdcontext_delete_from_sql_unsafe(rc); if(!dictionary_del(host->rrdctx.contexts, string2str(rc->id))) - error("RRDCONTEXT: context '%s' of host '%s', failed to be deleted from rrdmetrics dictionary.", + netdata_log_error("RRDCONTEXT: context '%s' of host '%s', failed to be deleted from rrdmetrics dictionary.", string2str(rc->id), rrdhost_hostname(host)); else @@ -844,7 +845,7 @@ void rrdcontext_message_send_unsafe(RRDCONTEXT *rc, bool snapshot __maybe_unused rrdcontext_delete_from_sql_unsafe(rc); else if (ctx_store_context(&rc->rrdhost->host_uuid, &rc->hub) != 0) - error("RRDCONTEXT: failed to save context '%s' version %"PRIu64" to SQL.", rc->hub.id, rc->hub.version); + netdata_log_error("RRDCONTEXT: failed to save context '%s' version %"PRIu64" to SQL.", rc->hub.id, rc->hub.version); } static bool check_if_cloud_version_changed_unsafe(RRDCONTEXT *rc, bool sending __maybe_unused) { @@ -1021,8 +1022,8 @@ static void rrdcontext_dispatch_queued_contexts_to_hub(RRDHOST *host, usec_t now // delete it from the master dictionary if(!dictionary_del(host->rrdctx.contexts, string2str(rc->id))) - error("RRDCONTEXT: '%s' of host '%s' failed to be deleted from rrdcontext dictionary.", - string2str(id), rrdhost_hostname(host)); + netdata_log_error("RRDCONTEXT: '%s' of host '%s' failed to be deleted from rrdcontext dictionary.", + string2str(id), rrdhost_hostname(host)); string_freez(id); } diff --git a/database/engine/cache.c b/database/engine/cache.c index 2c7a4232e4..7a9ccf8d1b 100644 --- a/database/engine/cache.c +++ b/database/engine/cache.c @@ -1847,7 +1847,7 @@ void pgc_destroy(PGC *cache) { free_all_unreferenced_clean_pages(cache); if(PGC_REFERENCED_PAGES(cache)) - error("DBENGINE CACHE: there are %zu referenced cache pages - leaving the cache allocated", PGC_REFERENCED_PAGES(cache)); + netdata_log_error("DBENGINE CACHE: there are %zu referenced cache pages - leaving the cache allocated", PGC_REFERENCED_PAGES(cache)); else { pointer_destroy_index(cache); diff --git a/database/engine/datafile.c b/database/engine/datafile.c index 9d75e4bc00..d5c1285be1 100644 --- a/database/engine/datafile.c +++ b/database/engine/datafile.c @@ -175,7 +175,7 @@ int close_data_file(struct rrdengine_datafile *datafile) ret = uv_fs_close(NULL, &req, datafile->file, NULL); if (ret < 0) { - error("DBENGINE: uv_fs_close(%s): %s", path, uv_strerror(ret)); + netdata_log_error("DBENGINE: uv_fs_close(%s): %s", path, uv_strerror(ret)); ctx_fs_error(ctx); } uv_fs_req_cleanup(&req); @@ -194,7 +194,7 @@ int unlink_data_file(struct rrdengine_datafile *datafile) ret = uv_fs_unlink(NULL, &req, path, NULL); if (ret < 0) { - error("DBENGINE: uv_fs_fsunlink(%s): %s", path, uv_strerror(ret)); + netdata_log_error("DBENGINE: uv_fs_fsunlink(%s): %s", path, uv_strerror(ret)); ctx_fs_error(ctx); } uv_fs_req_cleanup(&req); @@ -215,21 +215,21 @@ int destroy_data_file_unsafe(struct rrdengine_datafile *datafile) ret = uv_fs_ftruncate(NULL, &req, datafile->file, 0, NULL); if (ret < 0) { - error("DBENGINE: uv_fs_ftruncate(%s): %s", path, uv_strerror(ret)); + netdata_log_error("DBENGINE: uv_fs_ftruncate(%s): %s", path, uv_strerror(ret)); ctx_fs_error(ctx); } uv_fs_req_cleanup(&req); ret = uv_fs_close(NULL, &req, datafile->file, NULL); if (ret < 0) { - error("DBENGINE: uv_fs_close(%s): %s", path, uv_strerror(ret)); + netdata_log_error("DBENGINE: uv_fs_close(%s): %s", path, uv_strerror(ret)); ctx_fs_error(ctx); } uv_fs_req_cleanup(&req); ret = uv_fs_unlink(NULL, &req, path, NULL); if (ret < 0) { - error("DBENGINE: uv_fs_fsunlink(%s): %s", path, uv_strerror(ret)); + netdata_log_error("DBENGINE: uv_fs_fsunlink(%s): %s", path, uv_strerror(ret)); ctx_fs_error(ctx); } uv_fs_req_cleanup(&req); @@ -272,7 +272,7 @@ int create_data_file(struct rrdengine_datafile *datafile) ret = uv_fs_write(NULL, &req, file, &iov, 1, 0, NULL); if (ret < 0) { fatal_assert(req.result < 0); - error("DBENGINE: uv_fs_write: %s", uv_strerror(ret)); + netdata_log_error("DBENGINE: uv_fs_write: %s", uv_strerror(ret)); ctx_io_error(ctx); } uv_fs_req_cleanup(&req); @@ -303,7 +303,7 @@ static int check_data_file_superblock(uv_file file) ret = uv_fs_read(NULL, &req, file, &iov, 1, 0, NULL); if (ret < 0) { - error("DBENGINE: uv_fs_read: %s", uv_strerror(ret)); + netdata_log_error("DBENGINE: uv_fs_read: %s", uv_strerror(ret)); uv_fs_req_cleanup(&req); goto error; } @@ -313,7 +313,7 @@ static int check_data_file_superblock(uv_file file) if (strncmp(superblock->magic_number, RRDENG_DF_MAGIC, RRDENG_MAGIC_SZ) || strncmp(superblock->version, RRDENG_DF_VER, RRDENG_VER_SZ) || superblock->tier != 1) { - error("DBENGINE: file has invalid superblock."); + netdata_log_error("DBENGINE: file has invalid superblock."); ret = UV_EINVAL; } else { ret = 0; @@ -361,7 +361,7 @@ static int load_data_file(struct rrdengine_datafile *datafile) error = ret; ret = uv_fs_close(NULL, &req, file, NULL); if (ret < 0) { - error("DBENGINE: uv_fs_close(%s): %s", path, uv_strerror(ret)); + netdata_log_error("DBENGINE: uv_fs_close(%s): %s", path, uv_strerror(ret)); ctx_fs_error(ctx); } uv_fs_req_cleanup(&req); @@ -394,7 +394,7 @@ static int scan_data_files(struct rrdengine_instance *ctx) if (ret < 0) { fatal_assert(req.result < 0); uv_fs_req_cleanup(&req); - error("DBENGINE: uv_fs_scandir(%s): %s", ctx->config.dbfiles_path, uv_strerror(ret)); + netdata_log_error("DBENGINE: uv_fs_scandir(%s): %s", ctx->config.dbfiles_path, uv_strerror(ret)); ctx_fs_error(ctx); return ret; } @@ -416,7 +416,7 @@ static int scan_data_files(struct rrdengine_instance *ctx) } if (matched_files == MAX_DATAFILES) - error("DBENGINE: warning: hit maximum database engine file limit of %d files", MAX_DATAFILES); + netdata_log_error("DBENGINE: warning: hit maximum database engine file limit of %d files", MAX_DATAFILES); qsort(datafiles, matched_files, sizeof(*datafiles), scan_data_files_cmp); @@ -441,7 +441,7 @@ static int scan_data_files(struct rrdengine_instance *ctx) if (must_delete_pair) { char path[RRDENG_PATH_MAX]; - error("DBENGINE: deleting invalid data and journal file pair."); + netdata_log_error("DBENGINE: deleting invalid data and journal file pair."); ret = journalfile_unlink(journalfile); if (!ret) { journalfile_v1_generate_path(datafile, path, sizeof(path)); @@ -521,14 +521,14 @@ int init_data_files(struct rrdengine_instance *ctx) fatal_assert(0 == uv_rwlock_init(&ctx->datafiles.rwlock)); ret = scan_data_files(ctx); if (ret < 0) { - error("DBENGINE: failed to scan path \"%s\".", ctx->config.dbfiles_path); + netdata_log_error("DBENGINE: failed to scan path \"%s\".", ctx->config.dbfiles_path); return ret; } else if (0 == ret) { netdata_log_info("DBENGINE: data files not found, creating in path \"%s\".", ctx->config.dbfiles_path); ctx->atomic.last_fileno = 0; ret = create_new_datafile_pair(ctx, false); if (ret) { - error("DBENGINE: failed to create data and journal files in path \"%s\".", ctx->config.dbfiles_path); + netdata_log_error("DBENGINE: failed to create data and journal files in path \"%s\".", ctx->config.dbfiles_path); return ret; } } diff --git a/database/engine/journalfile.c b/database/engine/journalfile.c index bf1f981f31..c51c0f4be7 100644 --- a/database/engine/journalfile.c +++ b/database/engine/journalfile.c @@ -12,7 +12,7 @@ static void after_extent_write_journalfile_v1_io(uv_fs_t* req) debug(D_RRDENGINE, "%s: Journal block was written to disk.", __func__); if (req->result < 0) { ctx_io_error(ctx); - error("DBENGINE: %s: uv_fs_write: %s", __func__, uv_strerror((int)req->result)); + netdata_log_error("DBENGINE: %s: uv_fs_write: %s", __func__, uv_strerror((int)req->result)); } else { debug(D_RRDENGINE, "%s: Journal block was written to disk.", __func__); } @@ -271,7 +271,7 @@ static bool journalfile_v2_mounted_data_unmount(struct rrdengine_journalfile *jo if (munmap(journalfile->mmap.data, journalfile->mmap.size)) { char path[RRDENG_PATH_MAX]; journalfile_v2_generate_path(journalfile->datafile, path, sizeof(path)); - error("DBENGINE: failed to unmap index file '%s'", path); + netdata_log_error("DBENGINE: failed to unmap index file '%s'", path); internal_fatal(true, "DBENGINE: failed to unmap file '%s'", path); ctx_fs_error(journalfile->datafile->ctx); } @@ -483,7 +483,7 @@ static int close_uv_file(struct rrdengine_datafile *datafile, uv_file file) ret = uv_fs_close(NULL, &req, file, NULL); if (ret < 0) { journalfile_v1_generate_path(datafile, path, sizeof(path)); - error("DBENGINE: uv_fs_close(%s): %s", path, uv_strerror(ret)); + netdata_log_error("DBENGINE: uv_fs_close(%s): %s", path, uv_strerror(ret)); ctx_fs_error(datafile->ctx); } uv_fs_req_cleanup(&req); @@ -512,7 +512,7 @@ int journalfile_unlink(struct rrdengine_journalfile *journalfile) ret = uv_fs_unlink(NULL, &req, path, NULL); if (ret < 0) { - error("DBENGINE: uv_fs_fsunlink(%s): %s", path, uv_strerror(ret)); + netdata_log_error("DBENGINE: uv_fs_fsunlink(%s): %s", path, uv_strerror(ret)); ctx_fs_error(ctx); } uv_fs_req_cleanup(&req); @@ -536,7 +536,7 @@ int journalfile_destroy_unsafe(struct rrdengine_journalfile *journalfile, struct if (journalfile->file) { ret = uv_fs_ftruncate(NULL, &req, journalfile->file, 0, NULL); if (ret < 0) { - error("DBENGINE: uv_fs_ftruncate(%s): %s", path, uv_strerror(ret)); + netdata_log_error("DBENGINE: uv_fs_ftruncate(%s): %s", path, uv_strerror(ret)); ctx_fs_error(ctx); } uv_fs_req_cleanup(&req); @@ -546,14 +546,14 @@ int journalfile_destroy_unsafe(struct rrdengine_journalfile *journalfile, struct // This is the new journal v2 index file ret = uv_fs_unlink(NULL, &req, path_v2, NULL); if (ret < 0) { - error("DBENGINE: uv_fs_fsunlink(%s): %s", path, uv_strerror(ret)); + netdata_log_error("DBENGINE: uv_fs_fsunlink(%s): %s", path, uv_strerror(ret)); ctx_fs_error(ctx); } uv_fs_req_cleanup(&req); ret = uv_fs_unlink(NULL, &req, path, NULL); if (ret < 0) { - error("DBENGINE: uv_fs_fsunlink(%s): %s", path, uv_strerror(ret)); + netdata_log_error("DBENGINE: uv_fs_fsunlink(%s): %s", path, uv_strerror(ret)); ctx_fs_error(ctx); } uv_fs_req_cleanup(&req); @@ -598,7 +598,7 @@ int journalfile_create(struct rrdengine_journalfile *journalfile, struct rrdengi ret = uv_fs_write(NULL, &req, file, &iov, 1, 0, NULL); if (ret < 0) { fatal_assert(req.result < 0); - error("DBENGINE: uv_fs_write: %s", uv_strerror(ret)); + netdata_log_error("DBENGINE: uv_fs_write: %s", uv_strerror(ret)); ctx_io_error(ctx); } uv_fs_req_cleanup(&req); @@ -630,7 +630,7 @@ static int journalfile_check_superblock(uv_file file) ret = uv_fs_read(NULL, &req, file, &iov, 1, 0, NULL); if (ret < 0) { - error("DBENGINE: uv_fs_read: %s", uv_strerror(ret)); + netdata_log_error("DBENGINE: uv_fs_read: %s", uv_strerror(ret)); uv_fs_req_cleanup(&req); goto error; } @@ -639,7 +639,7 @@ static int journalfile_check_superblock(uv_file file) if (strncmp(superblock->magic_number, RRDENG_JF_MAGIC, RRDENG_MAGIC_SZ) || strncmp(superblock->version, RRDENG_JF_VER, RRDENG_VER_SZ)) { - error("DBENGINE: File has invalid superblock."); + netdata_log_error("DBENGINE: File has invalid superblock."); ret = UV_EINVAL; } else { ret = 0; @@ -660,7 +660,7 @@ static void journalfile_restore_extent_metadata(struct rrdengine_instance *ctx, descr_size = sizeof(*jf_metric_data->descr) * count; payload_length = sizeof(*jf_metric_data) + descr_size; if (payload_length > max_size) { - error("DBENGINE: corrupted transaction payload."); + netdata_log_error("DBENGINE: corrupted transaction payload."); return; } @@ -671,7 +671,7 @@ static void journalfile_restore_extent_metadata(struct rrdengine_instance *ctx, if (page_type > PAGE_TYPE_MAX) { if (!bitmap256_get_bit(&page_error_map, page_type)) { - error("DBENGINE: unknown page type %d encountered.", page_type); + netdata_log_error("DBENGINE: unknown page type %d encountered.", page_type); bitmap256_set_bit(&page_error_map, page_type, 1); } continue; @@ -744,14 +744,14 @@ static unsigned journalfile_replay_transaction(struct rrdengine_instance *ctx, s return 0; } if (sizeof(*jf_header) > max_size) { - error("DBENGINE: corrupted transaction record, skipping."); + netdata_log_error("DBENGINE: corrupted transaction record, skipping."); return 0; } *id = jf_header->id; payload_length = jf_header->payload_length; size_bytes = sizeof(*jf_header) + payload_length + sizeof(*jf_trailer); if (size_bytes > max_size) { - error("DBENGINE: corrupted transaction record, skipping."); + netdata_log_error("DBENGINE: corrupted transaction record, skipping."); return 0; } jf_trailer = buf + sizeof(*jf_header) + payload_length; @@ -760,7 +760,7 @@ static unsigned journalfile_replay_transaction(struct rrdengine_instance *ctx, s ret = crc32cmp(jf_trailer->checksum, crc); debug(D_RRDENGINE, "Transaction %"PRIu64" was read from disk. CRC32 check: %s", *id, ret ? "FAILED" : "SUCCEEDED"); if (unlikely(ret)) { - error("DBENGINE: transaction %"PRIu64" was read from disk. CRC32 check: FAILED", *id); + netdata_log_error("DBENGINE: transaction %"PRIu64" was read from disk. CRC32 check: FAILED", *id); return size_bytes; } switch (jf_header->type) { @@ -769,7 +769,7 @@ static unsigned journalfile_replay_transaction(struct rrdengine_instance *ctx, s journalfile_restore_extent_metadata(ctx, journalfile, buf + sizeof(*jf_header), payload_length); break; default: - error("DBENGINE: unknown transaction type, skipping record."); + netdata_log_error("DBENGINE: unknown transaction type, skipping record."); break; } @@ -807,7 +807,7 @@ static uint64_t journalfile_iterate_transactions(struct rrdengine_instance *ctx, iov = uv_buf_init(buf, size_bytes); ret = uv_fs_read(NULL, &req, file, &iov, 1, pos, NULL); if (ret < 0) { - error("DBENGINE: uv_fs_read: pos=%" PRIu64 ", %s", pos, uv_strerror(ret)); + netdata_log_error("DBENGINE: uv_fs_read: pos=%" PRIu64 ", %s", pos, uv_strerror(ret)); uv_fs_req_cleanup(&req); goto skip_file; } @@ -846,7 +846,7 @@ static int journalfile_check_v2_extent_list (void *data_start, size_t file_size) crc = crc32(0L, Z_NULL, 0); crc = crc32(crc, (uint8_t *) data_start + j2_header->extent_offset, j2_header->extent_count * sizeof(struct journal_extent_list)); if (unlikely(crc32cmp(journal_v2_trailer->checksum, crc))) { - error("DBENGINE: extent list CRC32 check: FAILED"); + netdata_log_error("DBENGINE: extent list CRC32 check: FAILED"); return 1; } @@ -866,7 +866,7 @@ static int journalfile_check_v2_metric_list(void *data_start, size_t file_size) crc = crc32(0L, Z_NULL, 0); crc = crc32(crc, (uint8_t *) data_start + j2_header->metric_offset, j2_header->metric_count * sizeof(struct journal_metric_list)); if (unlikely(crc32cmp(journal_v2_trailer->checksum, crc))) { - error("DBENGINE: metric list CRC32 check: FAILED"); + netdata_log_error("DBENGINE: metric list CRC32 check: FAILED"); return 1; } return 0; @@ -910,7 +910,7 @@ static int journalfile_v2_validate(void *data_start, size_t journal_v2_file_size rc = crc32cmp(journal_v2_trailer->checksum, crc); if (unlikely(rc)) { - error("DBENGINE: file CRC32 check: FAILED"); + netdata_log_error("DBENGINE: file CRC32 check: FAILED"); return 1; } @@ -1047,13 +1047,13 @@ int journalfile_v2_load(struct rrdengine_instance *ctx, struct rrdengine_journal if (errno == ENOENT) return 1; ctx_fs_error(ctx); - error("DBENGINE: failed to open '%s'", path_v2); + netdata_log_error("DBENGINE: failed to open '%s'", path_v2); return 1; } ret = fstat(fd, &statbuf); if (ret) { - error("DBENGINE: failed to get file information for '%s'", path_v2); + netdata_log_error("DBENGINE: failed to get file information for '%s'", path_v2); close(fd); return 1; } @@ -1085,7 +1085,7 @@ int journalfile_v2_load(struct rrdengine_instance *ctx, struct rrdengine_journal error_report("File %s is invalid and it will be rebuilt", path_v2); if (unlikely(munmap(data_start, journal_v2_file_size))) - error("DBENGINE: failed to unmap '%s'", path_v2); + netdata_log_error("DBENGINE: failed to unmap '%s'", path_v2); close(fd); return rc; @@ -1096,7 +1096,7 @@ int journalfile_v2_load(struct rrdengine_instance *ctx, struct rrdengine_journal if (unlikely(!entries)) { if (unlikely(munmap(data_start, journal_v2_file_size))) - error("DBENGINE: failed to unmap '%s'", path_v2); + netdata_log_error("DBENGINE: failed to unmap '%s'", path_v2); close(fd); return 1; @@ -1479,7 +1479,7 @@ void journalfile_migrate_to_v2_callback(Word_t section, unsigned datafile_fileno if (ret < 0) { ctx_current_disk_space_increase(ctx, total_file_size); ctx_fs_error(ctx); - error("DBENGINE: failed to resize file '%s'", path); + netdata_log_error("DBENGINE: failed to resize file '%s'", path); } else ctx_current_disk_space_increase(ctx, resize_file_to); @@ -1560,7 +1560,7 @@ int journalfile_load(struct rrdengine_instance *ctx, struct rrdengine_journalfil cleanup: ret = uv_fs_close(NULL, &req, file, NULL); if (ret < 0) { - error("DBENGINE: uv_fs_close(%s): %s", path, uv_strerror(ret)); + netdata_log_error("DBENGINE: uv_fs_close(%s): %s", path, uv_strerror(ret)); ctx_fs_error(ctx); } uv_fs_req_cleanup(&req); diff --git a/database/engine/rrdengine.c b/database/engine/rrdengine.c index 555ff4f282..84d0c41efd 100644 --- a/database/engine/rrdengine.c +++ b/database/engine/rrdengine.c @@ -733,7 +733,7 @@ static void after_extent_write_datafile_io(uv_fs_t *uv_fs_request) { if (uv_fs_request->result < 0) { ctx_io_error(ctx); - error("DBENGINE: %s: uv_fs_write(): %s", __func__, uv_strerror((int)uv_fs_request->result)); + netdata_log_error("DBENGINE: %s: uv_fs_write(): %s", __func__, uv_strerror((int)uv_fs_request->result)); } journalfile_v1_extent_write(ctx, xt_io_descr->datafile, xt_io_descr->wal, &rrdeng_main.loop); @@ -1643,14 +1643,14 @@ bool rrdeng_dbengine_spawn(struct rrdengine_instance *ctx __maybe_unused) { ret = uv_loop_init(&rrdeng_main.loop); if (ret) { - error("DBENGINE: uv_loop_init(): %s", uv_strerror(ret)); + netdata_log_error("DBENGINE: uv_loop_init(): %s", uv_strerror(ret)); return false; } rrdeng_main.loop.data = &rrdeng_main; ret = uv_async_init(&rrdeng_main.loop, &rrdeng_main.async, async_cb); if (ret) { - error("DBENGINE: uv_async_init(): %s", uv_strerror(ret)); + netdata_log_error("DBENGINE: uv_async_init(): %s", uv_strerror(ret)); fatal_assert(0 == uv_loop_close(&rrdeng_main.loop)); return false; } @@ -1658,7 +1658,7 @@ bool rrdeng_dbengine_spawn(struct rrdengine_instance *ctx __maybe_unused) { ret = uv_timer_init(&rrdeng_main.loop, &rrdeng_main.timer); if (ret) { - error("DBENGINE: uv_timer_init(): %s", uv_strerror(ret)); + netdata_log_error("DBENGINE: uv_timer_init(): %s", uv_strerror(ret)); uv_close((uv_handle_t *)&rrdeng_main.async, NULL); fatal_assert(0 == uv_loop_close(&rrdeng_main.loop)); return false; diff --git a/database/engine/rrdengineapi.c b/database/engine/rrdengineapi.c index 6f87a1d46b..49df5c8146 100755 --- a/database/engine/rrdengineapi.c +++ b/database/engine/rrdengineapi.c @@ -247,7 +247,7 @@ STORAGE_COLLECT_HANDLE *rrdeng_store_metric_init(STORAGE_METRIC_HANDLE *db_metri is_1st_metric_writer = false; char uuid[UUID_STR_LEN + 1]; uuid_unparse(*mrg_metric_uuid(main_mrg, metric), uuid); - error("DBENGINE: metric '%s' is already collected and should not be collected twice - expect gaps on the charts", uuid); + netdata_log_error("DBENGINE: metric '%s' is already collected and should not be collected twice - expect gaps on the charts", uuid); } metric = mrg_metric_dup(main_mrg, metric); @@ -312,7 +312,7 @@ static bool page_has_only_empty_metrics(struct rrdeng_collect_handle *handle) { default: { static bool logged = false; if(!logged) { - error("DBENGINE: cannot check page for nulls on unknown page type id %d", (mrg_metric_ctx(handle->metric))->config.page_type); + netdata_log_error("DBENGINE: cannot check page for nulls on unknown page type id %d", (mrg_metric_ctx(handle->metric))->config.page_type); logged = true; } return false; @@ -908,7 +908,7 @@ STORAGE_POINT rrdeng_load_metric_next(struct storage_engine_query_handle *rrddim default: { static bool logged = false; if(!logged) { - error("DBENGINE: unknown page type %d found. Cannot decode it. Ignoring its metrics.", handle->ctx->config.page_type); + netdata_log_error("DBENGINE: unknown page type %d found. Cannot decode it. Ignoring its metrics.", handle->ctx->config.page_type); logged = true; } storage_point_empty(sp, sp.start_time_s, sp.end_time_s); @@ -986,7 +986,7 @@ bool rrdeng_metric_retention_by_uuid(STORAGE_INSTANCE *db_instance, uuid_t *dim_ { struct rrdengine_instance *ctx = (struct rrdengine_instance *)db_instance; if (unlikely(!ctx)) { - error("DBENGINE: invalid STORAGE INSTANCE to %s()", __FUNCTION__); + netdata_log_error("DBENGINE: invalid STORAGE INSTANCE to %s()", __FUNCTION__); return false; } @@ -1160,7 +1160,7 @@ int rrdeng_init(struct rrdengine_instance **ctxp, const char *dbfiles_path, /* reserve RRDENG_FD_BUDGET_PER_INSTANCE file descriptors for this instance */ rrd_stat_atomic_add(&rrdeng_reserved_file_descriptors, RRDENG_FD_BUDGET_PER_INSTANCE); if (rrdeng_reserved_file_descriptors > max_open_files) { - error( + netdata_log_error( "Exceeded the budget of available file descriptors (%u/%u), cannot create new dbengine instance.", (unsigned)rrdeng_reserved_file_descriptors, (unsigned)max_open_files); diff --git a/database/engine/rrdenginelib.c b/database/engine/rrdenginelib.c index a1e99fd62c..dc581d98d9 100644 --- a/database/engine/rrdenginelib.c +++ b/database/engine/rrdenginelib.c @@ -14,12 +14,12 @@ int check_file_properties(uv_file file, uint64_t *file_size, size_t min_size) fatal_assert(req.result == 0); s = req.ptr; if (!(s->st_mode & S_IFREG)) { - error("Not a regular file.\n"); + netdata_log_error("Not a regular file.\n"); uv_fs_req_cleanup(&req); return UV_EINVAL; } if (s->st_size < min_size) { - error("File length is too short.\n"); + netdata_log_error("File length is too short.\n"); uv_fs_req_cleanup(&req); return UV_EINVAL; } @@ -56,9 +56,9 @@ int open_file_for_io(char *path, int flags, uv_file *file, int direct) fd = uv_fs_open(NULL, &req, path, current_flags, S_IRUSR | S_IWUSR, NULL); if (fd < 0) { if ((direct) && (UV_EINVAL == fd)) { - error("File \"%s\" does not support direct I/O, falling back to buffered I/O.", path); + netdata_log_error("File \"%s\" does not support direct I/O, falling back to buffered I/O.", path); } else { - error("Failed to open file \"%s\".", path); + netdata_log_error("Failed to open file \"%s\".", path); --direct; /* break the loop */ } } else { @@ -107,7 +107,7 @@ int count_legacy_children(char *dbfiles_path) ret = uv_fs_scandir(NULL, &req, dbfiles_path, 0, NULL); if (ret < 0) { uv_fs_req_cleanup(&req); - error("uv_fs_scandir(%s): %s", dbfiles_path, uv_strerror(ret)); + netdata_log_error("uv_fs_scandir(%s): %s", dbfiles_path, uv_strerror(ret)); return ret; } @@ -134,7 +134,7 @@ int compute_multidb_diskspace() fclose(fp); if (unlikely(rc != 1 || computed_multidb_disk_quota_mb < RRDENG_MIN_DISK_SPACE_MB)) { errno = 0; - error("File '%s' contains invalid input, it will be rebuild", multidb_disk_space_file); + netdata_log_error("File '%s' contains invalid input, it will be rebuild", multidb_disk_space_file); computed_multidb_disk_quota_mb = -1; } } @@ -151,7 +151,7 @@ int compute_multidb_diskspace() netdata_log_info("Created file '%s' to store the computed value", multidb_disk_space_file); fclose(fp); } else - error("Failed to store the default multidb disk quota size on '%s'", multidb_disk_space_file); + netdata_log_error("Failed to store the default multidb disk quota size on '%s'", multidb_disk_space_file); } else computed_multidb_disk_quota_mb = default_rrdeng_disk_quota_mb; diff --git a/database/engine/rrdenginelib.h b/database/engine/rrdenginelib.h index ca8eacae4d..831e485316 100644 --- a/database/engine/rrdenginelib.h +++ b/database/engine/rrdenginelib.h @@ -53,7 +53,7 @@ static inline void modify_bit(unsigned *x, unsigned pos, uint8_t val) *x |= 1U << pos; break; default: - error("modify_bit() called with invalid argument."); + netdata_log_error("modify_bit() called with invalid argument."); break; } } diff --git a/database/ram/rrddim_mem.c b/database/ram/rrddim_mem.c index 64e219ef6e..a434f57d12 100644 --- a/database/ram/rrddim_mem.c +++ b/database/ram/rrddim_mem.c @@ -283,7 +283,7 @@ static inline size_t rrddim_time2slot(STORAGE_METRIC_HANDLE *db_metric_handle, t } if(unlikely(ret >= entries)) { - error("INTERNAL ERROR: rrddim_time2slot() on %s returns values outside entries", rrddim_name(rd)); + netdata_log_error("INTERNAL ERROR: rrddim_time2slot() on %s returns values outside entries", rrddim_name(rd)); ret = entries - 1; } @@ -304,7 +304,7 @@ static inline time_t rrddim_slot2time(STORAGE_METRIC_HANDLE *db_metric_handle, s size_t update_every = mh->update_every_s; if(slot >= entries) { - error("INTERNAL ERROR: caller of rrddim_slot2time() gives invalid slot %zu", slot); + netdata_log_error("INTERNAL ERROR: caller of rrddim_slot2time() gives invalid slot %zu", slot); slot = entries - 1; } @@ -314,14 +314,14 @@ static inline time_t rrddim_slot2time(STORAGE_METRIC_HANDLE *db_metric_handle, s ret = last_entry_s - (time_t)(update_every * (last_slot - slot)); if(unlikely(ret < first_entry_s)) { - error("INTERNAL ERROR: rrddim_slot2time() on dimension '%s' of chart '%s' returned time (%ld) too far in the past (before first_entry_s %ld) for slot %zu", + netdata_log_error("INTERNAL ERROR: rrddim_slot2time() on dimension '%s' of chart '%s' returned time (%ld) too far in the past (before first_entry_s %ld) for slot %zu", rrddim_name(rd), rrdset_id(rd->rrdset), ret, first_entry_s, slot); ret = first_entry_s; } if(unlikely(ret > last_entry_s)) { - error("INTERNAL ERROR: rrddim_slot2time() on dimension '%s' of chart '%s' returned time (%ld) too far into the future (after last_entry_s %ld) for slot %zu", + netdata_log_error("INTERNAL ERROR: rrddim_slot2time() on dimension '%s' of chart '%s' returned time (%ld) too far into the future (after last_entry_s %ld) for slot %zu", rrddim_name(rd), rrdset_id(rd->rrdset), ret, last_entry_s, slot); ret = last_entry_s; diff --git a/database/rrd.c b/database/rrd.c index d489ddb8b1..5b7752a5ea 100644 --- a/database/rrd.c +++ b/database/rrd.c @@ -148,7 +148,7 @@ char *rrdhost_cache_dir_for_rrdset_alloc(RRDHOST *host, const char *id) { if(host->rrd_memory_mode == RRD_MEMORY_MODE_MAP || host->rrd_memory_mode == RRD_MEMORY_MODE_SAVE) { int r = mkdir(ret, 0775); if(r != 0 && errno != EEXIST) - error("Cannot create directory '%s'", ret); + netdata_log_error("Cannot create directory '%s'", ret); } return ret; diff --git a/database/rrdcalc.c b/database/rrdcalc.c index 97db28d2eb..fe9da58018 100644 --- a/database/rrdcalc.c +++ b/database/rrdcalc.c @@ -56,7 +56,7 @@ inline const char *rrdcalc_status2string(RRDCALC_STATUS status) { return "CRITICAL"; default: - error("Unknown alarm status %d", status); + netdata_log_error("Unknown alarm status %d", status); return "UNKNOWN"; } } @@ -217,7 +217,7 @@ static void rrdcalc_link_to_rrdset(RRDSET *st, RRDCALC *rc) { netdata_rwlock_unlock(&st->alerts.rwlock); if(rc->update_every < rc->rrdset->update_every) { - error("Health alarm '%s.%s' has update every %d, less than chart update every %d. Setting alarm update frequency to %d.", rrdset_id(rc->rrdset), rrdcalc_name(rc), rc->update_every, rc->rrdset->update_every, rc->rrdset->update_every); + netdata_log_error("Health alarm '%s.%s' has update every %d, less than chart update every %d. Setting alarm update frequency to %d.", rrdset_id(rc->rrdset), rrdcalc_name(rc), rc->update_every, rc->rrdset->update_every, rc->rrdset->update_every); rc->update_every = rc->rrdset->update_every; } @@ -318,7 +318,7 @@ static void rrdcalc_unlink_from_rrdset(RRDCALC *rc, bool having_ll_wrlock) { if(!st) { debug(D_HEALTH, "Requested to unlink RRDCALC '%s.%s' which is not linked to any RRDSET", rrdcalc_chart_name(rc), rrdcalc_name(rc)); - error("Requested to unlink RRDCALC '%s.%s' which is not linked to any RRDSET", rrdcalc_chart_name(rc), rrdcalc_name(rc)); + netdata_log_error("Requested to unlink RRDCALC '%s.%s' which is not linked to any RRDSET", rrdcalc_chart_name(rc), rrdcalc_name(rc)); return; } @@ -512,17 +512,17 @@ static void rrdcalc_rrdhost_insert_callback(const DICTIONARY_ITEM *item __maybe_ if(rt->calculation) { rc->calculation = expression_parse(rt->calculation->source, NULL, NULL); if(!rc->calculation) - error("Health alarm '%s.%s': failed to parse calculation expression '%s'", rrdset_id(st), rrdcalctemplate_name(rt), rt->calculation->source); + netdata_log_error("Health alarm '%s.%s': failed to parse calculation expression '%s'", rrdset_id(st), rrdcalctemplate_name(rt), rt->calculation->source); } if(rt->warning) { rc->warning = expression_parse(rt->warning->source, NULL, NULL); if(!rc->warning) - error("Health alarm '%s.%s': failed to re-parse warning expression '%s'", rrdset_id(st), rrdcalctemplate_name(rt), rt->warning->source); + netdata_log_error("Health alarm '%s.%s': failed to re-parse warning expression '%s'", rrdset_id(st), rrdcalctemplate_name(rt), rt->warning->source); } if(rt->critical) { rc->critical = expression_parse(rt->critical->source, NULL, NULL); if(!rc->critical) - error("Health alarm '%s.%s': failed to re-parse critical expression '%s'", rrdset_id(st), rrdcalctemplate_name(rt), rt->critical->source); + netdata_log_error("Health alarm '%s.%s': failed to re-parse critical expression '%s'", rrdset_id(st), rrdcalctemplate_name(rt), rt->critical->source); } } else if(ctr->from_config) { @@ -703,23 +703,23 @@ void rrdcalc_add_from_rrdcalctemplate(RRDHOST *host, RRDCALCTEMPLATE *rt, RRDSET dictionary_set_advanced(host->rrdcalc_root_index, key, (ssize_t)(key_len + 1), NULL, sizeof(RRDCALC), &tmp); if(tmp.react_action != RRDCALC_REACT_NEW && tmp.existing_from_template == false) - error("RRDCALC: from template '%s' on chart '%s' with key '%s', failed to be added to host '%s'. It is manually configured.", + netdata_log_error("RRDCALC: from template '%s' on chart '%s' with key '%s', failed to be added to host '%s'. It is manually configured.", string2str(rt->name), rrdset_id(st), key, rrdhost_hostname(host)); } int rrdcalc_add_from_config(RRDHOST *host, RRDCALC *rc) { if(!rc->chart) { - error("Health configuration for alarm '%s' does not have a chart", rrdcalc_name(rc)); + netdata_log_error("Health configuration for alarm '%s' does not have a chart", rrdcalc_name(rc)); return 0; } if(!rc->update_every) { - error("Health configuration for alarm '%s.%s' has no frequency (parameter 'every'). Ignoring it.", rrdcalc_chart_name(rc), rrdcalc_name(rc)); + netdata_log_error("Health configuration for alarm '%s.%s' has no frequency (parameter 'every'). Ignoring it.", rrdcalc_chart_name(rc), rrdcalc_name(rc)); return 0; } if(!RRDCALC_HAS_DB_LOOKUP(rc) && !rc->calculation && !rc->warning && !rc->critical) { - error("Health configuration for alarm '%s.%s' is useless (no db lookup, no calculation, no warning and no critical expressions)", rrdcalc_chart_name(rc), rrdcalc_name(rc)); + netdata_log_error("Health configuration for alarm '%s.%s' is useless (no db lookup, no calculation, no warning and no critical expressions)", rrdcalc_chart_name(rc), rrdcalc_name(rc)); return 0; } @@ -750,7 +750,7 @@ int rrdcalc_add_from_config(RRDHOST *host, RRDCALC *rc) { rrdset_foreach_done(st); } else { - error( + netdata_log_error( "RRDCALC: from config '%s' on chart '%s' failed to be added to host '%s'. It already exists.", string2str(rc->name), string2str(rc->chart), @@ -811,7 +811,7 @@ void rrdcalc_unlink_all_rrdset_alerts(RRDSET *st) { netdata_rwlock_wrlock(&st->alerts.rwlock); while((rc = st->alerts.base)) { if(last == rc) { - error("RRDCALC: malformed list of alerts linked to chart - cannot cleanup - giving up."); + netdata_log_error("RRDCALC: malformed list of alerts linked to chart - cannot cleanup - giving up."); break; } last = rc; diff --git a/database/rrdcalctemplate.c b/database/rrdcalctemplate.c index 81f47edd83..2444ef3ccc 100644 --- a/database/rrdcalctemplate.c +++ b/database/rrdcalctemplate.c @@ -223,17 +223,17 @@ static size_t rrdcalctemplate_key(char *dst, size_t dst_len, const char *name, c void rrdcalctemplate_add_from_config(RRDHOST *host, RRDCALCTEMPLATE *rt) { if(unlikely(!rt->context)) { - error("Health configuration for template '%s' does not have a context", rrdcalctemplate_name(rt)); + netdata_log_error("Health configuration for template '%s' does not have a context", rrdcalctemplate_name(rt)); return; } if(unlikely(!rt->update_every)) { - error("Health configuration for template '%s' has no frequency (parameter 'every'). Ignoring it.", rrdcalctemplate_name(rt)); + netdata_log_error("Health configuration for template '%s' has no frequency (parameter 'every'). Ignoring it.", rrdcalctemplate_name(rt)); return; } if(unlikely(!RRDCALCTEMPLATE_HAS_DB_LOOKUP(rt) && !rt->calculation && !rt->warning && !rt->critical)) { - error("Health configuration for template '%s' is useless (no calculation, no warning and no critical evaluation)", rrdcalctemplate_name(rt)); + netdata_log_error("Health configuration for template '%s' is useless (no calculation, no warning and no critical evaluation)", rrdcalctemplate_name(rt)); return; } diff --git a/database/rrddim.c b/database/rrddim.c index 96560b1c1a..4dc51b86cb 100644 --- a/database/rrddim.c +++ b/database/rrddim.c @@ -102,10 +102,10 @@ static void rrddim_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, v } if(!initialized) - error("Failed to initialize all db tiers for chart '%s', dimension '%s", rrdset_name(st), rrddim_name(rd)); + netdata_log_error("Failed to initialize all db tiers for chart '%s', dimension '%s", rrdset_name(st), rrddim_name(rd)); if(!rd->tiers[0].db_metric_handle) - error("Failed to initialize the first db tier for chart '%s', dimension '%s", rrdset_name(st), rrddim_name(rd)); + netdata_log_error("Failed to initialize the first db tier for chart '%s', dimension '%s", rrdset_name(st), rrddim_name(rd)); } // initialize data collection for all tiers @@ -120,7 +120,7 @@ static void rrddim_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, v } if(!initialized) - error("Failed to initialize data collection for all db tiers for chart '%s', dimension '%s", rrdset_name(st), rrddim_name(rd)); + netdata_log_error("Failed to initialize data collection for all db tiers for chart '%s', dimension '%s", rrdset_name(st), rrddim_name(rd)); } if(rrdset_number_of_dimensions(st) != 0) { @@ -499,7 +499,7 @@ int rrddim_hide(RRDSET *st, const char *id) { RRDDIM *rd = rrddim_find(st, id); if(unlikely(!rd)) { - error("Cannot find dimension with id '%s' on stats '%s' (%s) on host '%s'.", id, rrdset_name(st), rrdset_id(st), rrdhost_hostname(host)); + netdata_log_error("Cannot find dimension with id '%s' on stats '%s' (%s) on host '%s'.", id, rrdset_name(st), rrdset_id(st), rrdhost_hostname(host)); return 1; } if (!rrddim_flag_check(rd, RRDDIM_FLAG_META_HIDDEN)) { @@ -518,7 +518,7 @@ int rrddim_unhide(RRDSET *st, const char *id) { RRDHOST *host = st->rrdhost; RRDDIM *rd = rrddim_find(st, id); if(unlikely(!rd)) { - error("Cannot find dimension with id '%s' on stats '%s' (%s) on host '%s'.", id, rrdset_name(st), rrdset_id(st), rrdhost_hostname(host)); + netdata_log_error("Cannot find dimension with id '%s' on stats '%s' (%s) on host '%s'.", id, rrdset_name(st), rrdset_id(st), rrdhost_hostname(host)); return 1; } if (rrddim_flag_check(rd, RRDDIM_FLAG_META_HIDDEN)) { @@ -582,7 +582,7 @@ collected_number rrddim_set(RRDSET *st, const char *id, collected_number value) RRDHOST *host = st->rrdhost; RRDDIM *rd = rrddim_find(st, id); if(unlikely(!rd)) { - error("Cannot find dimension with id '%s' on stats '%s' (%s) on host '%s'.", id, rrdset_name(st), rrdset_id(st), rrdhost_hostname(host)); + netdata_log_error("Cannot find dimension with id '%s' on stats '%s' (%s) on host '%s'.", id, rrdset_name(st), rrdset_id(st), rrdhost_hostname(host)); return 0; } @@ -711,12 +711,12 @@ bool rrddim_memory_load_or_create_map_save(RRDSET *st, RRDDIM *rd, RRD_MEMORY_MO reset = 1; } else if(rd_on_file->memsize != size) { - error("File %s does not have the desired size, expected %lu but found %lu. Clearing it.", fullfilename, size, (unsigned long int) rd_on_file->memsize); + netdata_log_error("File %s does not have the desired size, expected %lu but found %lu. Clearing it.", fullfilename, size, (unsigned long int) rd_on_file->memsize); memset(rd_on_file, 0, size); reset = 1; } else if(rd_on_file->update_every != st->update_every) { - error("File %s does not have the same update frequency, expected %d but found %d. Clearing it.", fullfilename, st->update_every, rd_on_file->update_every); + netdata_log_error("File %s does not have the same update frequency, expected %d but found %d. Clearing it.", fullfilename, st->update_every, rd_on_file->update_every); memset(rd_on_file, 0, size); reset = 1; } diff --git a/database/rrdhost.c b/database/rrdhost.c index fdc591f15b..afe64ddf64 100644 --- a/database/rrdhost.c +++ b/database/rrdhost.c @@ -116,7 +116,8 @@ static inline RRDHOST *rrdhost_index_add_by_guid(RRDHOST *host) { rrdhost_option_set(host, RRDHOST_OPTION_INDEXED_MACHINE_GUID); else { rrdhost_option_clear(host, RRDHOST_OPTION_INDEXED_MACHINE_GUID); - error("RRDHOST: %s() host with machine guid '%s' is already indexed", __FUNCTION__, host->machine_guid); + netdata_log_error("RRDHOST: %s() host with machine guid '%s' is already indexed", + __FUNCTION__, host->machine_guid); } return host; @@ -125,7 +126,8 @@ static inline RRDHOST *rrdhost_index_add_by_guid(RRDHOST *host) { static void rrdhost_index_del_by_guid(RRDHOST *host) { if(rrdhost_option_check(host, RRDHOST_OPTION_INDEXED_MACHINE_GUID)) { if(!dictionary_del(rrdhost_root_index, host->machine_guid)) - error("RRDHOST: %s() failed to delete machine guid '%s' from index", __FUNCTION__, host->machine_guid); + netdata_log_error("RRDHOST: %s() failed to delete machine guid '%s' from index", + __FUNCTION__, host->machine_guid); rrdhost_option_clear(host, RRDHOST_OPTION_INDEXED_MACHINE_GUID); } @@ -146,7 +148,8 @@ static inline void rrdhost_index_del_hostname(RRDHOST *host) { if(rrdhost_option_check(host, RRDHOST_OPTION_INDEXED_HOSTNAME)) { if(!dictionary_del(rrdhost_root_index_hostname, rrdhost_hostname(host))) - error("RRDHOST: %s() failed to delete hostname '%s' from index", __FUNCTION__, rrdhost_hostname(host)); + netdata_log_error("RRDHOST: %s() failed to delete hostname '%s' from index", + __FUNCTION__, rrdhost_hostname(host)); rrdhost_option_clear(host, RRDHOST_OPTION_INDEXED_HOSTNAME); } @@ -303,7 +306,8 @@ static RRDHOST *rrdhost_create( debug(D_RRDHOST, "Host '%s': adding with guid '%s'", hostname, guid); if(memory_mode == RRD_MEMORY_MODE_DBENGINE && !dbengine_enabled) { - error("memory mode 'dbengine' is not enabled, but host '%s' is configured for it. Falling back to 'alloc'", hostname); + netdata_log_error("memory mode 'dbengine' is not enabled, but host '%s' is configured for it. Falling back to 'alloc'", + hostname); memory_mode = RRD_MEMORY_MODE_ALLOC; } @@ -387,7 +391,7 @@ int is_legacy = 1; (host->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE && is_legacy))) { int r = mkdir(host->cache_dir, 0775); if(r != 0 && errno != EEXIST) - error("Host '%s': cannot create directory '%s'", rrdhost_hostname(host), host->cache_dir); + netdata_log_error("Host '%s': cannot create directory '%s'", rrdhost_hostname(host), host->cache_dir); } } @@ -413,7 +417,7 @@ int is_legacy = 1; ret = mkdir(dbenginepath, 0775); if (ret != 0 && errno != EEXIST) - error("Host '%s': cannot create directory '%s'", rrdhost_hostname(host), dbenginepath); + netdata_log_error("Host '%s': cannot create directory '%s'", rrdhost_hostname(host), dbenginepath); else ret = 0; // succeed @@ -454,9 +458,8 @@ int is_legacy = 1; } if (ret) { // check legacy or multihost initialization success - error( - "Host '%s': cannot initialize host with machine guid '%s'. Failed to initialize DB engine at '%s'.", - rrdhost_hostname(host), host->machine_guid, host->cache_dir); + netdata_log_error("Host '%s': cannot initialize host with machine guid '%s'. Failed to initialize DB engine at '%s'.", + rrdhost_hostname(host), host->machine_guid, host->cache_dir); rrd_wrlock(); rrdhost_free___while_having_rrd_wrlock(host, true); @@ -504,7 +507,8 @@ int is_legacy = 1; RRDHOST *t = rrdhost_index_add_by_guid(host); if(t != host) { - error("Host '%s': cannot add host with machine guid '%s' to index. It already exists as host '%s' with machine guid '%s'.", rrdhost_hostname(host), host->machine_guid, rrdhost_hostname(t), t->machine_guid); + netdata_log_error("Host '%s': cannot add host with machine guid '%s' to index. It already exists as host '%s' with machine guid '%s'.", + rrdhost_hostname(host), host->machine_guid, rrdhost_hostname(t), t->machine_guid); rrdhost_free___while_having_rrd_wrlock(host, true); rrd_unlock(); return NULL; @@ -633,19 +637,23 @@ static void rrdhost_update(RRDHOST *host } if(host->rrd_update_every != update_every) - error("Host '%s' has an update frequency of %d seconds, but the wanted one is %d seconds. " - "Restart netdata here to apply the new settings.", - rrdhost_hostname(host), host->rrd_update_every, update_every); + netdata_log_error("Host '%s' has an update frequency of %d seconds, but the wanted one is %d seconds. " + "Restart netdata here to apply the new settings.", + rrdhost_hostname(host), host->rrd_update_every, update_every); if(host->rrd_memory_mode != mode) - error("Host '%s' has memory mode '%s', but the wanted one is '%s'. " - "Restart netdata here to apply the new settings.", - rrdhost_hostname(host), rrd_memory_mode_name(host->rrd_memory_mode), rrd_memory_mode_name(mode)); + netdata_log_error("Host '%s' has memory mode '%s', but the wanted one is '%s'. " + "Restart netdata here to apply the new settings.", + rrdhost_hostname(host), + rrd_memory_mode_name(host->rrd_memory_mode), + rrd_memory_mode_name(mode)); else if(host->rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE && host->rrd_history_entries < history) - error("Host '%s' has history of %d entries, but the wanted one is %ld entries. " - "Restart netdata here to apply the new settings.", - rrdhost_hostname(host), host->rrd_history_entries, history); + netdata_log_error("Host '%s' has history of %d entries, but the wanted one is %ld entries. " + "Restart netdata here to apply the new settings.", + rrdhost_hostname(host), + host->rrd_history_entries, + history); // update host tags rrdhost_init_tags(host, tags); @@ -725,8 +733,10 @@ RRDHOST *rrdhost_find_or_create( return host; /* If a legacy memory mode instantiates all dbengine state must be discarded to avoid inconsistencies */ - error("Archived host '%s' has memory mode '%s', but the wanted one is '%s'. Discarding archived state.", - rrdhost_hostname(host), rrd_memory_mode_name(host->rrd_memory_mode), rrd_memory_mode_name(mode)); + netdata_log_error("Archived host '%s' has memory mode '%s', but the wanted one is '%s'. Discarding archived state.", + rrdhost_hostname(host), + rrd_memory_mode_name(host->rrd_memory_mode), + rrd_memory_mode_name(mode)); rrd_wrlock(); rrdhost_free___while_having_rrd_wrlock(host, true); @@ -834,18 +844,18 @@ void dbengine_init(char *hostname) { if (read_num > 0 && read_num <= MAX_PAGES_PER_EXTENT) rrdeng_pages_per_extent = read_num; else { - error("Invalid dbengine pages per extent %u given. Using %u.", read_num, rrdeng_pages_per_extent); + netdata_log_error("Invalid dbengine pages per extent %u given. Using %u.", read_num, rrdeng_pages_per_extent); config_set_number(CONFIG_SECTION_DB, "dbengine pages per extent", rrdeng_pages_per_extent); } storage_tiers = config_get_number(CONFIG_SECTION_DB, "storage tiers", storage_tiers); if(storage_tiers < 1) { - error("At least 1 storage tier is required. Assuming 1."); + netdata_log_error("At least 1 storage tier is required. Assuming 1."); storage_tiers = 1; config_set_number(CONFIG_SECTION_DB, "storage tiers", storage_tiers); } if(storage_tiers > RRD_STORAGE_TIERS) { - error("Up to %d storage tier are supported. Assuming %d.", RRD_STORAGE_TIERS, RRD_STORAGE_TIERS); + netdata_log_error("Up to %d storage tier are supported. Assuming %d.", RRD_STORAGE_TIERS, RRD_STORAGE_TIERS); storage_tiers = RRD_STORAGE_TIERS; config_set_number(CONFIG_SECTION_DB, "storage tiers", storage_tiers); } @@ -867,7 +877,7 @@ void dbengine_init(char *hostname) { int ret = mkdir(dbenginepath, 0775); if (ret != 0 && errno != EEXIST) { - error("DBENGINE on '%s': cannot create directory '%s'", hostname, dbenginepath); + netdata_log_error("DBENGINE on '%s': cannot create directory '%s'", hostname, dbenginepath); break; } @@ -887,7 +897,9 @@ void dbengine_init(char *hostname) { if(grouping_iterations < 2) { grouping_iterations = 2; config_set_number(CONFIG_SECTION_DB, dbengineconfig, grouping_iterations); - error("DBENGINE on '%s': 'dbegnine tier %zu update every iterations' cannot be less than 2. Assuming 2.", hostname, tier); + netdata_log_error("DBENGINE on '%s': 'dbegnine tier %zu update every iterations' cannot be less than 2. Assuming 2.", + hostname, + tier); } snprintfz(dbengineconfig, 200, "dbengine tier %zu backfill", tier); @@ -896,7 +908,7 @@ void dbengine_init(char *hostname) { else if(strcmp(bf, "full") == 0) backfill = RRD_BACKFILL_FULL; else if(strcmp(bf, "none") == 0) backfill = RRD_BACKFILL_NONE; else { - error("DBENGINE: unknown backfill value '%s', assuming 'new'", bf); + netdata_log_error("DBENGINE: unknown backfill value '%s', assuming 'new'", bf); config_set(CONFIG_SECTION_DB, dbengineconfig, "new"); backfill = RRD_BACKFILL_NEW; } @@ -907,7 +919,10 @@ void dbengine_init(char *hostname) { if(tier > 0 && get_tier_grouping(tier) > 65535) { storage_tiers_grouping_iterations[tier] = 1; - error("DBENGINE on '%s': dbengine tier %zu gives aggregation of more than 65535 points of tier 0. Disabling tiers above %zu", hostname, tier, tier); + netdata_log_error("DBENGINE on '%s': dbengine tier %zu gives aggregation of more than 65535 points of tier 0. Disabling tiers above %zu", + hostname, + tier, + tier); break; } @@ -935,16 +950,21 @@ void dbengine_init(char *hostname) { netdata_thread_join(tiers_init[tier].thread, &ptr); if(tiers_init[tier].ret != 0) { - error("DBENGINE on '%s': Failed to initialize multi-host database tier %zu on path '%s'", - hostname, tiers_init[tier].tier, tiers_init[tier].path); + netdata_log_error("DBENGINE on '%s': Failed to initialize multi-host database tier %zu on path '%s'", + hostname, + tiers_init[tier].tier, + tiers_init[tier].path); } else if(created_tiers == tier) created_tiers++; } if(created_tiers && created_tiers < storage_tiers) { - error("DBENGINE on '%s': Managed to create %zu tiers instead of %zu. Continuing with %zu available.", - hostname, created_tiers, storage_tiers, created_tiers); + netdata_log_error("DBENGINE on '%s': Managed to create %zu tiers instead of %zu. Continuing with %zu available.", + hostname, + created_tiers, + storage_tiers, + created_tiers); storage_tiers = created_tiers; } else if(!created_tiers) @@ -957,7 +977,7 @@ void dbengine_init(char *hostname) { #else storage_tiers = config_get_number(CONFIG_SECTION_DB, "storage tiers", 1); if(storage_tiers != 1) { - error("DBENGINE is not available on '%s', so only 1 database tier can be supported.", hostname); + netdata_log_error("DBENGINE is not available on '%s', so only 1 database tier can be supported.", hostname); storage_tiers = 1; config_set_number(CONFIG_SECTION_DB, "storage tiers", storage_tiers); } @@ -998,13 +1018,13 @@ int rrd_init(char *hostname, struct rrdhost_system_info *system_info, bool unitt if (!dbengine_enabled) { if (storage_tiers > 1) { - error("dbengine is not enabled, but %zu tiers have been requested. Resetting tiers to 1", - storage_tiers); + netdata_log_error("dbengine is not enabled, but %zu tiers have been requested. Resetting tiers to 1", + storage_tiers); storage_tiers = 1; } if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) { - error("dbengine is not enabled, but it has been given as the default db mode. Resetting db mode to alloc"); + netdata_log_error("dbengine is not enabled, but it has been given as the default db mode. Resetting db mode to alloc"); default_rrd_memory_mode = RRD_MEMORY_MODE_ALLOC; } } @@ -1412,7 +1432,7 @@ static void rrdhost_load_config_labels(void) { int status = config_load(NULL, 1, CONFIG_SECTION_HOST_LABEL); if(!status) { char *filename = CONFIG_DIR "/" CONFIG_FILENAME; - error("RRDLABEL: Cannot reload the configuration file '%s', using labels in memory", filename); + netdata_log_error("RRDLABEL: Cannot reload the configuration file '%s', using labels in memory", filename); } struct section *co = appconfig_get_section(&netdata_config, CONFIG_SECTION_HOST_LABEL); @@ -1432,7 +1452,7 @@ static void rrdhost_load_kubernetes_labels(void) { sprintf(label_script, "%s/%s", netdata_configured_primary_plugins_dir, "get-kubernetes-labels.sh"); if (unlikely(access(label_script, R_OK) != 0)) { - error("Kubernetes pod label fetching script %s not found.",label_script); + netdata_log_error("Kubernetes pod label fetching script %s not found.",label_script); return; } @@ -1450,7 +1470,8 @@ static void rrdhost_load_kubernetes_labels(void) { // Non-zero exit code means that all the script output is error messages. We've shown already any message that didn't include a ':' // Here we'll inform with an ERROR that the script failed, show whatever (if anything) was added to the list of labels, free the memory and set the return to null int rc = netdata_pclose(fp_child_input, fp_child_output, pid); - if(rc) error("%s exited abnormally. Failed to get kubernetes labels.", label_script); + if(rc) + netdata_log_error("%s exited abnormally. Failed to get kubernetes labels.", label_script); } void reload_host_labels(void) { diff --git a/database/rrdlabels.c b/database/rrdlabels.c index 051222109f..77d9a91f00 100644 --- a/database/rrdlabels.c +++ b/database/rrdlabels.c @@ -571,7 +571,7 @@ static void labels_add_already_sanitized(DICTIONARY *dict, const char *key, cons void rrdlabels_add(DICTIONARY *dict, const char *name, const char *value, RRDLABEL_SRC ls) { if(!dict) { - error("%s(): called with NULL dictionary.", __FUNCTION__ ); + netdata_log_error("%s(): called with NULL dictionary.", __FUNCTION__ ); return; } @@ -580,7 +580,7 @@ void rrdlabels_add(DICTIONARY *dict, const char *name, const char *value, RRDLAB rrdlabels_sanitize_value(v, value, RRDLABELS_MAX_VALUE_LENGTH); if(!*n) { - error("%s: cannot add name '%s' (value '%s') which is sanitized as empty string", __FUNCTION__, name, value); + netdata_log_error("%s: cannot add name '%s' (value '%s') which is sanitized as empty string", __FUNCTION__, name, value); return; } @@ -621,7 +621,7 @@ static const char *get_quoted_string_up_to(char *dst, size_t dst_size, const cha void rrdlabels_add_pair(DICTIONARY *dict, const char *string, RRDLABEL_SRC ls) { if(!dict) { - error("%s(): called with NULL dictionary.", __FUNCTION__ ); + netdata_log_error("%s(): called with NULL dictionary.", __FUNCTION__ ); return; } diff --git a/database/rrdset.c b/database/rrdset.c index 3e1a1249a5..c16c1ee845 100644 --- a/database/rrdset.c +++ b/database/rrdset.c @@ -842,10 +842,10 @@ void rrdset_delete_files(RRDSET *st) { if(cache_filename) { netdata_log_info("Deleting chart header file '%s'.", cache_filename); if (unlikely(unlink(cache_filename) == -1)) - error("Cannot delete chart header file '%s'", cache_filename); + netdata_log_error("Cannot delete chart header file '%s'", cache_filename); } else - error("Cannot find the cache filename of chart '%s'", rrdset_id(st)); + netdata_log_error("Cannot find the cache filename of chart '%s'", rrdset_id(st)); } rrddim_foreach_read(rd, st) { @@ -854,7 +854,7 @@ void rrdset_delete_files(RRDSET *st) { netdata_log_info("Deleting dimension file '%s'.", cache_filename); if(unlikely(unlink(cache_filename) == -1)) - error("Cannot delete dimension file '%s'", cache_filename); + netdata_log_error("Cannot delete dimension file '%s'", cache_filename); } rrddim_foreach_done(rd); @@ -873,7 +873,7 @@ void rrdset_delete_obsolete_dimensions(RRDSET *st) { if(!cache_filename) continue; netdata_log_info("Deleting dimension file '%s'.", cache_filename); if(unlikely(unlink(cache_filename) == -1)) - error("Cannot delete dimension file '%s'", cache_filename); + netdata_log_error("Cannot delete dimension file '%s'", cache_filename); } } rrddim_foreach_done(rd); @@ -1538,7 +1538,7 @@ void rrdset_timed_done(RRDSET *st, struct timeval now, bool pending_rrdset_next) } if (unlikely(rrdset_flags & RRDSET_FLAG_OBSOLETE)) { - error("Chart '%s' has the OBSOLETE flag set, but it is collected.", rrdset_id(st)); + netdata_log_error("Chart '%s' has the OBSOLETE flag set, but it is collected.", rrdset_id(st)); rrdset_isnot_obsolete(st); } @@ -1685,7 +1685,7 @@ void rrdset_timed_done(RRDSET *st, struct timeval now, bool pending_rrdset_next) collected_total += rd->collector.collected_value; if(unlikely(rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE))) { - error("Dimension %s in chart '%s' has the OBSOLETE flag set, but it is collected.", rrddim_name(rd), rrdset_id(st)); + netdata_log_error("Dimension %s in chart '%s' has the OBSOLETE flag set, but it is collected.", rrddim_name(rd), rrdset_id(st)); rrddim_isnot_obsolete(st, rd); } } @@ -2166,15 +2166,15 @@ bool rrdset_memory_load_or_create_map_save(RRDSET *st, RRD_MEMORY_MODE memory_mo memset(st_on_file, 0, size); } else if(strncmp(st_on_file->id, rrdset_id(st), RRD_ID_LENGTH_MAX_V019) != 0) { - error("File '%s' contents are not for chart '%s'. Clearing it.", fullfilename, rrdset_id(st)); + netdata_log_error("File '%s' contents are not for chart '%s'. Clearing it.", fullfilename, rrdset_id(st)); memset(st_on_file, 0, size); } else if(st_on_file->memsize != size || st_on_file->entries != st->db.entries) { - error("File '%s' does not have the desired size. Clearing it.", fullfilename); + netdata_log_error("File '%s' does not have the desired size. Clearing it.", fullfilename); memset(st_on_file, 0, size); } else if(st_on_file->update_every != st->update_every) { - error("File '%s' does not have the desired granularity. Clearing it.", fullfilename); + netdata_log_error("File '%s' does not have the desired granularity. Clearing it.", fullfilename); memset(st_on_file, 0, size); } else if((now_s - st_on_file->last_updated.tv_sec) > (long)st->update_every * (long)st->db.entries) { @@ -2182,7 +2182,7 @@ bool rrdset_memory_load_or_create_map_save(RRDSET *st, RRD_MEMORY_MODE memory_mo memset(st_on_file, 0, size); } else if(st_on_file->last_updated.tv_sec > now_s + st->update_every) { - error("File '%s' refers to the future by %zd secs. Resetting it to now.", fullfilename, (ssize_t)(st_on_file->last_updated.tv_sec - now_s)); + netdata_log_error("File '%s' refers to the future by %zd secs. Resetting it to now.", fullfilename, (ssize_t)(st_on_file->last_updated.tv_sec - now_s)); st_on_file->last_updated.tv_sec = now_s; } diff --git a/database/rrdsetvar.c b/database/rrdsetvar.c index 15377ddb2e..742266daf7 100644 --- a/database/rrdsetvar.c +++ b/database/rrdsetvar.c @@ -262,8 +262,13 @@ void rrdsetvar_custom_chart_variable_set(RRDSET *st, const RRDSETVAR_ACQUIRED *r RRDSETVAR *rs = dictionary_acquired_item_value((const DICTIONARY_ITEM *)rsa); if(rs->type != RRDVAR_TYPE_CALCULATED || !(rs->flags & RRDVAR_FLAG_CUSTOM_CHART_VAR) || !(rs->flags & RRDVAR_FLAG_ALLOCATED)) { - error("RRDSETVAR: requested to set variable '%s' of chart '%s' on host '%s' to value " NETDATA_DOUBLE_FORMAT - " but the variable is not a custom chart one (it has options 0x%x, value pointer %p). Ignoring request.", string2str(rs->name), rrdset_id(st), rrdhost_hostname(st->rrdhost), value, (uint32_t)rs->flags, rs->value); + netdata_log_error("RRDSETVAR: requested to set variable '%s' of chart '%s' on host '%s' to value " NETDATA_DOUBLE_FORMAT + " but the variable is not a custom chart one (it has options 0x%x, value pointer %p). Ignoring request.", + string2str(rs->name), + rrdset_id(st), + rrdhost_hostname(st->rrdhost), + value, + (uint32_t)rs->flags, rs->value); } else { NETDATA_DOUBLE *v = rs->value; diff --git a/database/rrdvar.c b/database/rrdvar.c index b03efc9589..09c4d404dd 100644 --- a/database/rrdvar.c +++ b/database/rrdvar.c @@ -175,7 +175,7 @@ void rrdvar_custom_host_variable_set(RRDHOST *host, const RRDVAR_ACQUIRED *rva, if(unlikely(!host->rrdvars || !rva)) return; // when health is not enabled if(rrdvar_type(rva) != RRDVAR_TYPE_CALCULATED || !(rrdvar_flags(rva) & (RRDVAR_FLAG_CUSTOM_HOST_VAR | RRDVAR_FLAG_ALLOCATED))) - error("requested to set variable '%s' to value " NETDATA_DOUBLE_FORMAT " but the variable is not a custom one.", rrdvar_name(rva), value); + netdata_log_error("requested to set variable '%s' to value " NETDATA_DOUBLE_FORMAT " but the variable is not a custom one.", rrdvar_name(rva), value); else { RRDVAR *rv = dictionary_acquired_item_value((const DICTIONARY_ITEM *)rva); NETDATA_DOUBLE *v = rv->value; @@ -228,7 +228,7 @@ NETDATA_DOUBLE rrdvar2number(const RRDVAR_ACQUIRED *rva) { } default: - error("I don't know how to convert RRDVAR type %u to NETDATA_DOUBLE", rv->type); + netdata_log_error("I don't know how to convert RRDVAR type %u to NETDATA_DOUBLE", rv->type); return NAN; } } diff --git a/database/sqlite/sqlite_aclk.c b/database/sqlite/sqlite_aclk.c index 1778a7c0bb..7758e97b65 100644 --- a/database/sqlite/sqlite_aclk.c +++ b/database/sqlite/sqlite_aclk.c @@ -257,7 +257,7 @@ static void sql_delete_aclk_table_list(char *host_guid) rc = db_execute(db_meta, buffer_tostring(sql)); if (unlikely(rc)) - error("Failed to drop unused ACLK tables"); + netdata_log_error("Failed to drop unused ACLK tables"); fail: buffer_free(sql); diff --git a/database/sqlite/sqlite_metadata.c b/database/sqlite/sqlite_metadata.c index 864e8809b2..697772bf51 100644 --- a/database/sqlite/sqlite_metadata.c +++ b/database/sqlite/sqlite_metadata.c @@ -950,7 +950,7 @@ static void cleanup_finished_threads(struct host_context_load_thread *hclt, size || (wait && __atomic_load_n(&(hclt[index].busy), __ATOMIC_ACQUIRE))) { int rc = uv_thread_join(&(hclt[index].thread)); if (rc) - error("Failed to join thread, rc = %d",rc); + netdata_log_error("Failed to join thread, rc = %d",rc); __atomic_store_n(&(hclt[index].busy), false, __ATOMIC_RELEASE); __atomic_store_n(&(hclt[index].finished), false, __ATOMIC_RELEASE); } @@ -1244,21 +1244,21 @@ static void metadata_event_loop(void *arg) loop = wc->loop = mallocz(sizeof(uv_loop_t)); ret = uv_loop_init(loop); if (ret) { - error("uv_loop_init(): %s", uv_strerror(ret)); + netdata_log_error("uv_loop_init(): %s", uv_strerror(ret)); goto error_after_loop_init; } loop->data = wc; ret = uv_async_init(wc->loop, &wc->async, async_cb); if (ret) { - error("uv_async_init(): %s", uv_strerror(ret)); + netdata_log_error("uv_async_init(): %s", uv_strerror(ret)); goto error_after_async_init; } wc->async.data = wc; ret = uv_timer_init(loop, &wc->timer_req); if (ret) { - error("uv_timer_init(): %s", uv_strerror(ret)); + netdata_log_error("uv_timer_init(): %s", uv_strerror(ret)); goto error_after_timer_init; } wc->timer_req.data = wc; diff --git a/exporting/aws_kinesis/aws_kinesis.c b/exporting/aws_kinesis/aws_kinesis.c index 9aa7e1aa1a..c3e99951a8 100644 --- a/exporting/aws_kinesis/aws_kinesis.c +++ b/exporting/aws_kinesis/aws_kinesis.c @@ -54,7 +54,8 @@ int init_aws_kinesis_instance(struct instance *instance) instance->buffer = (void *)buffer_create(0, &netdata_buffers_statistics.buffers_exporters); if (!instance->buffer) { - error("EXPORTING: cannot create buffer for AWS Kinesis exporting connector instance %s", instance->config.name); + netdata_log_error("EXPORTING: cannot create buffer for AWS Kinesis exporting connector instance %s", + instance->config.name); return 1; } if (uv_mutex_init(&instance->mutex)) @@ -72,7 +73,7 @@ int init_aws_kinesis_instance(struct instance *instance) instance->connector_specific_data = (void *)connector_specific_data; if (!strcmp(connector_specific_config->stream_name, "")) { - error("stream name is a mandatory Kinesis parameter but it is not configured"); + netdata_log_error("stream name is a mandatory Kinesis parameter but it is not configured"); return 1; } @@ -174,10 +175,11 @@ void aws_kinesis_connector_worker(void *instance_p) if (unlikely(kinesis_get_result( connector_specific_data->request_outcomes, error_message, &sent_bytes, &lost_bytes))) { // oops! we couldn't send (all or some of the) data - error("EXPORTING: %s", error_message); - error( - "EXPORTING: failed to write data to external database '%s'. Willing to write %zu bytes, wrote %zu bytes.", - instance->config.destination, sent_bytes, sent_bytes - lost_bytes); + netdata_log_error("EXPORTING: %s", error_message); + netdata_log_error("EXPORTING: failed to write data to external database '%s'. Willing to write %zu bytes, wrote %zu bytes.", + instance->config.destination, + sent_bytes, + sent_bytes - lost_bytes); stats->transmission_failures++; stats->data_lost_events++; diff --git a/exporting/exporting_engine.c b/exporting/exporting_engine.c index 50bad99429..f42a36e921 100644 --- a/exporting/exporting_engine.c +++ b/exporting/exporting_engine.c @@ -183,7 +183,7 @@ void *exporting_main(void *ptr) } if (init_connectors(engine) != 0) { - error("EXPORTING: cannot initialize exporting connectors"); + netdata_log_error("EXPORTING: cannot initialize exporting connectors"); send_statistics("EXPORTING_START", "FAIL", "-"); goto cleanup; } diff --git a/exporting/exporting_engine.h b/exporting/exporting_engine.h index c04bbeec36..fb436e9338 100644 --- a/exporting/exporting_engine.h +++ b/exporting/exporting_engine.h @@ -307,7 +307,7 @@ static inline void disable_instance(struct instance *instance) instance->disabled = 1; instance->scheduled = 0; uv_mutex_unlock(&instance->mutex); - error("EXPORTING: Instance %s disabled", instance->config.name); + netdata_log_error("EXPORTING: Instance %s disabled", instance->config.name); } #include "exporting/prometheus/prometheus.h" diff --git a/exporting/graphite/graphite.c b/exporting/graphite/graphite.c index 34b2509c9e..254db982e5 100644 --- a/exporting/graphite/graphite.c +++ b/exporting/graphite/graphite.c @@ -49,7 +49,7 @@ int init_graphite_instance(struct instance *instance) instance->buffer = (void *)buffer_create(0, &netdata_buffers_statistics.buffers_exporters); if (!instance->buffer) { - error("EXPORTING: cannot create buffer for graphite exporting connector instance %s", instance->config.name); + netdata_log_error("EXPORTING: cannot create buffer for graphite exporting connector instance %s", instance->config.name); return 1; } diff --git a/exporting/init_connectors.c b/exporting/init_connectors.c index 15e1951f8f..5167a68c2d 100644 --- a/exporting/init_connectors.c +++ b/exporting/init_connectors.c @@ -85,14 +85,14 @@ int init_connectors(struct engine *engine) #endif break; default: - error("EXPORTING: unknown exporting connector type"); + netdata_log_error("EXPORTING: unknown exporting connector type"); return 1; } // dispatch the instance worker thread int error = uv_thread_create(&instance->thread, instance->worker, instance); if (error) { - error("EXPORTING: cannot create thread worker. uv_thread_create(): %s", uv_strerror(error)); + netdata_log_error("EXPORTING: cannot create thread worker. uv_thread_create(): %s", uv_strerror(error)); return 1; } char threadname[NETDATA_THREAD_NAME_MAX + 1]; @@ -113,7 +113,7 @@ static size_t base64_encode(unsigned char *input, size_t input_size, char *outpu "abcdefghijklmnopqrstuvwxyz" "0123456789+/"; if ((input_size / 3 + 1) * 4 >= output_size) { - error("Output buffer for encoding size=%zu is not large enough for %zu-bytes input", output_size, input_size); + netdata_log_error("Output buffer for encoding size=%zu is not large enough for %zu-bytes input", output_size, input_size); return 0; } size_t count = 0; @@ -123,7 +123,7 @@ static size_t base64_encode(unsigned char *input, size_t input_size, char *outpu output[1] = lookup[(value >> 12) & 0x3f]; output[2] = lookup[(value >> 6) & 0x3f]; output[3] = lookup[value & 0x3f]; - //error("Base-64 encode (%04x) -> %c %c %c %c\n", value, output[0], output[1], output[2], output[3]); + //netdata_log_error("Base-64 encode (%04x) -> %c %c %c %c\n", value, output[0], output[1], output[2], output[3]); output += 4; input += 3; input_size -= 3; @@ -136,7 +136,7 @@ static size_t base64_encode(unsigned char *input, size_t input_size, char *outpu output[1] = lookup[(value >> 6) & 0x3f]; output[2] = lookup[value & 0x3f]; output[3] = '='; - //error("Base-64 encode (%06x) -> %c %c %c %c\n", (value>>2)&0xffff, output[0], output[1], output[2], output[3]); + //netdata_log_error("Base-64 encode (%06x) -> %c %c %c %c\n", (value>>2)&0xffff, output[0], output[1], output[2], output[3]); count += 4; output[4] = '\0'; break; @@ -146,7 +146,7 @@ static size_t base64_encode(unsigned char *input, size_t input_size, char *outpu output[1] = lookup[value & 0x3f]; output[2] = '='; output[3] = '='; - //error("Base-64 encode (%06x) -> %c %c %c %c\n", value, output[0], output[1], output[2], output[3]); + //netdata_log_error("Base-64 encode (%06x) -> %c %c %c %c\n", value, output[0], output[1], output[2], output[3]); count += 4; output[4] = '\0'; break; diff --git a/exporting/json/json.c b/exporting/json/json.c index 4155a7c401..d916fe7741 100644 --- a/exporting/json/json.c +++ b/exporting/json/json.c @@ -39,7 +39,7 @@ int init_json_instance(struct instance *instance) instance->buffer = (void *)buffer_create(0, &netdata_buffers_statistics.buffers_exporters); if (!instance->buffer) { - error("EXPORTING: cannot create buffer for json exporting connector instance %s", instance->config.name); + netdata_log_error("EXPORTING: cannot create buffer for json exporting connector instance %s", instance->config.name); return 1; } diff --git a/exporting/mongodb/mongodb.c b/exporting/mongodb/mongodb.c index 9a5ae3fc2e..bd7541a9d0 100644 --- a/exporting/mongodb/mongodb.c +++ b/exporting/mongodb/mongodb.c @@ -18,21 +18,22 @@ int mongodb_init(struct instance *instance) bson_error_t bson_error; if (unlikely(!connector_specific_config->collection || !*connector_specific_config->collection)) { - error("EXPORTING: collection name is a mandatory MongoDB parameter, but it is not configured"); + netdata_log_error("EXPORTING: collection name is a mandatory MongoDB parameter, but it is not configured"); return 1; } uri = mongoc_uri_new_with_error(instance->config.destination, &bson_error); if (unlikely(!uri)) { - error( - "EXPORTING: failed to parse URI: %s. Error message: %s", instance->config.destination, bson_error.message); + netdata_log_error("EXPORTING: failed to parse URI: %s. Error message: %s", + instance->config.destination, + bson_error.message); return 1; } int32_t socket_timeout = mongoc_uri_get_option_as_int32(uri, MONGOC_URI_SOCKETTIMEOUTMS, instance->config.timeoutms); if (!mongoc_uri_set_option_as_int32(uri, MONGOC_URI_SOCKETTIMEOUTMS, socket_timeout)) { - error("EXPORTING: failed to set %s to the value %d", MONGOC_URI_SOCKETTIMEOUTMS, socket_timeout); + netdata_log_error("EXPORTING: failed to set %s to the value %d", MONGOC_URI_SOCKETTIMEOUTMS, socket_timeout); return 1; }; @@ -41,12 +42,12 @@ int mongodb_init(struct instance *instance) connector_specific_data->client = mongoc_client_new_from_uri(uri); if (unlikely(!connector_specific_data->client)) { - error("EXPORTING: failed to create a new client"); + netdata_log_error("EXPORTING: failed to create a new client"); return 1; } if (!mongoc_client_set_appname(connector_specific_data->client, "netdata")) { - error("EXPORTING: failed to set client appname"); + netdata_log_error("EXPORTING: failed to set client appname"); }; connector_specific_data->collection = mongoc_client_get_collection( @@ -108,7 +109,8 @@ int init_mongodb_instance(struct instance *instance) instance->buffer = (void *)buffer_create(0, &netdata_buffers_statistics.buffers_exporters); if (!instance->buffer) { - error("EXPORTING: cannot create buffer for MongoDB exporting connector instance %s", instance->config.name); + netdata_log_error("EXPORTING: cannot create buffer for MongoDB exporting connector instance %s", + instance->config.name); return 1; } if (uv_mutex_init(&instance->mutex)) @@ -128,7 +130,7 @@ int init_mongodb_instance(struct instance *instance) } if (unlikely(mongodb_init(instance))) { - error("EXPORTING: cannot initialize MongoDB exporting connector"); + netdata_log_error("EXPORTING: cannot initialize MongoDB exporting connector"); return 1; } @@ -195,7 +197,7 @@ int format_batch_mongodb(struct instance *instance) insert[documents_inserted] = bson_new_from_json((const uint8_t *)start, -1, &bson_error); if (unlikely(!insert[documents_inserted])) { - error( + netdata_log_error( "EXPORTING: Failed creating a BSON document from a JSON string \"%s\" : %s", start, bson_error.message); free_bson(insert, documents_inserted); return 1; @@ -350,8 +352,8 @@ void mongodb_connector_worker(void *instance_p) stats->receptions++; } else { // oops! we couldn't send (all or some of the) data - error("EXPORTING: %s", bson_error.message); - error( + netdata_log_error("EXPORTING: %s", bson_error.message); + netdata_log_error( "EXPORTING: failed to write data to the database '%s'. " "Willing to write %zu bytes, wrote %zu bytes.", instance->config.destination, data_size, 0UL); diff --git a/exporting/opentsdb/opentsdb.c b/exporting/opentsdb/opentsdb.c index 3362a335af..ffccb5b223 100644 --- a/exporting/opentsdb/opentsdb.c +++ b/exporting/opentsdb/opentsdb.c @@ -46,7 +46,7 @@ int init_opentsdb_telnet_instance(struct instance *instance) instance->buffer = (void *)buffer_create(0, &netdata_buffers_statistics.buffers_exporters); if (!instance->buffer) { - error("EXPORTING: cannot create buffer for opentsdb telnet exporting connector instance %s", instance->config.name); + netdata_log_error("EXPORTING: cannot create buffer for opentsdb telnet exporting connector instance %s", instance->config.name); return 1; } @@ -102,7 +102,7 @@ int init_opentsdb_http_instance(struct instance *instance) instance->buffer = (void *)buffer_create(0, &netdata_buffers_statistics.buffers_exporters); if (!instance->buffer) { - error("EXPORTING: cannot create buffer for opentsdb HTTP exporting connector instance %s", instance->config.name); + netdata_log_error("EXPORTING: cannot create buffer for opentsdb HTTP exporting connector instance %s", instance->config.name); return 1; } diff --git a/exporting/process_data.c b/exporting/process_data.c index 88d0b56a11..e36c5fa993 100644 --- a/exporting/process_data.c +++ b/exporting/process_data.c @@ -170,7 +170,7 @@ void start_batch_formatting(struct engine *engine) if (instance->scheduled) { uv_mutex_lock(&instance->mutex); if (instance->start_batch_formatting && instance->start_batch_formatting(instance) != 0) { - error("EXPORTING: cannot start batch formatting for %s", instance->config.name); + netdata_log_error("EXPORTING: cannot start batch formatting for %s", instance->config.name); disable_instance(instance); } } @@ -189,7 +189,7 @@ void start_host_formatting(struct engine *engine, RRDHOST *host) if (instance->scheduled) { if (rrdhost_is_exportable(instance, host)) { if (instance->start_host_formatting && instance->start_host_formatting(instance, host) != 0) { - error("EXPORTING: cannot start host formatting for %s", instance->config.name); + netdata_log_error("EXPORTING: cannot start host formatting for %s", instance->config.name); disable_instance(instance); } } else { @@ -211,7 +211,7 @@ void start_chart_formatting(struct engine *engine, RRDSET *st) if (instance->scheduled && !instance->skip_host) { if (rrdset_is_exportable(instance, st)) { if (instance->start_chart_formatting && instance->start_chart_formatting(instance, st) != 0) { - error("EXPORTING: cannot start chart formatting for %s", instance->config.name); + netdata_log_error("EXPORTING: cannot start chart formatting for %s", instance->config.name); disable_instance(instance); } } else { @@ -232,7 +232,7 @@ void metric_formatting(struct engine *engine, RRDDIM *rd) for (struct instance *instance = engine->instance_root; instance; instance = instance->next) { if (instance->scheduled && !instance->skip_host && !instance->skip_chart) { if (instance->metric_formatting && instance->metric_formatting(instance, rd) != 0) { - error("EXPORTING: cannot format metric for %s", instance->config.name); + netdata_log_error("EXPORTING: cannot format metric for %s", instance->config.name); disable_instance(instance); continue; } @@ -252,7 +252,7 @@ void end_chart_formatting(struct engine *engine, RRDSET *st) for (struct instance *instance = engine->instance_root; instance; instance = instance->next) { if (instance->scheduled && !instance->skip_host && !instance->skip_chart) { if (instance->end_chart_formatting && instance->end_chart_formatting(instance, st) != 0) { - error("EXPORTING: cannot end chart formatting for %s", instance->config.name); + netdata_log_error("EXPORTING: cannot end chart formatting for %s", instance->config.name); disable_instance(instance); continue; } @@ -271,8 +271,8 @@ void variables_formatting(struct engine *engine, RRDHOST *host) { for (struct instance *instance = engine->instance_root; instance; instance = instance->next) { if (instance->scheduled && !instance->skip_host && should_send_variables(instance)) { - if (instance->variables_formatting && instance->variables_formatting(instance, host) != 0){ - error("EXPORTING: cannot format variables for %s", instance->config.name); + if (instance->variables_formatting && instance->variables_formatting(instance, host) != 0){ + netdata_log_error("EXPORTING: cannot format variables for %s", instance->config.name); disable_instance(instance); continue; } @@ -293,7 +293,7 @@ void end_host_formatting(struct engine *engine, RRDHOST *host) for (struct instance *instance = engine->instance_root; instance; instance = instance->next) { if (instance->scheduled && !instance->skip_host) { if (instance->end_host_formatting && instance->end_host_formatting(instance, host) != 0) { - error("EXPORTING: cannot end host formatting for %s", instance->config.name); + netdata_log_error("EXPORTING: cannot end host formatting for %s", instance->config.name); disable_instance(instance); continue; } @@ -312,7 +312,7 @@ void end_batch_formatting(struct engine *engine) for (struct instance *instance = engine->instance_root; instance; instance = instance->next) { if (instance->scheduled) { if (instance->end_batch_formatting && instance->end_batch_formatting(instance) != 0) { - error("EXPORTING: cannot end batch formatting for %s", instance->config.name); + netdata_log_error("EXPORTING: cannot end batch formatting for %s", instance->config.name); disable_instance(instance); continue; } diff --git a/exporting/prometheus/remote_write/remote_write.c b/exporting/prometheus/remote_write/remote_write.c index 39b977312e..b48095d16d 100644 --- a/exporting/prometheus/remote_write/remote_write.c +++ b/exporting/prometheus/remote_write/remote_write.c @@ -386,7 +386,7 @@ int format_batch_prometheus_remote_write(struct instance *instance) size_t data_size = get_write_request_size(connector_specific_data->write_request); if (unlikely(!data_size)) { - error("EXPORTING: write request size is out of range"); + netdata_log_error("EXPORTING: write request size is out of range"); return 1; } @@ -394,7 +394,7 @@ int format_batch_prometheus_remote_write(struct instance *instance) buffer_need_bytes(buffer, data_size); if (unlikely(pack_and_clear_write_request(connector_specific_data->write_request, buffer->buffer, &data_size))) { - error("EXPORTING: cannot pack write request"); + netdata_log_error("EXPORTING: cannot pack write request"); return 1; } buffer->len = data_size; diff --git a/exporting/pubsub/pubsub.c b/exporting/pubsub/pubsub.c index b889801021..5e67b5e065 100644 --- a/exporting/pubsub/pubsub.c +++ b/exporting/pubsub/pubsub.c @@ -32,7 +32,7 @@ int init_pubsub_instance(struct instance *instance) instance->buffer = (void *)buffer_create(0, &netdata_buffers_statistics.buffers_exporters); if (!instance->buffer) { - error("EXPORTING: cannot create buffer for Pub/Sub exporting connector instance %s", instance->config.name); + netdata_log_error("EXPORTING: cannot create buffer for Pub/Sub exporting connector instance %s", instance->config.name); return 1; } uv_mutex_init(&instance->mutex); @@ -48,7 +48,7 @@ int init_pubsub_instance(struct instance *instance) (void *)connector_specific_data, error_message, instance->config.destination, connector_specific_config->credentials_file, connector_specific_config->project_id, connector_specific_config->topic_id)) { - error( + netdata_log_error( "EXPORTING: Cannot initialize a Pub/Sub publisher for instance %s: %s", instance->config.name, error_message); return 1; @@ -132,7 +132,7 @@ void pubsub_connector_worker(void *instance_p) stats->buffered_bytes = buffer_len; if (pubsub_add_message(instance->connector_specific_data, (char *)buffer_tostring(buffer))) { - error("EXPORTING: Instance %s: Cannot add data to a message", instance->config.name); + netdata_log_error("EXPORTING: Instance %s: Cannot add data to a message", instance->config.name); stats->data_lost_events++; stats->lost_metrics += stats->buffered_metrics; @@ -146,7 +146,7 @@ void pubsub_connector_worker(void *instance_p) connector_specific_config->project_id, connector_specific_config->topic_id, buffer_len); if (pubsub_publish((void *)connector_specific_data, error_message, stats->buffered_metrics, buffer_len)) { - error("EXPORTING: Instance: %s: Cannot publish a message: %s", instance->config.name, error_message); + netdata_log_error("EXPORTING: Instance: %s: Cannot publish a message: %s", instance->config.name, error_message); stats->transmission_failures++; stats->data_lost_events++; @@ -164,8 +164,8 @@ void pubsub_connector_worker(void *instance_p) if (unlikely(pubsub_get_result( connector_specific_data, error_message, &sent_metrics, &sent_bytes, &lost_metrics, &lost_bytes))) { // oops! we couldn't send (all or some of the) data - error("EXPORTING: %s", error_message); - error( + netdata_log_error("EXPORTING: %s", error_message); + netdata_log_error( "EXPORTING: failed to write data to service '%s'. Willing to write %zu bytes, wrote %zu bytes.", instance->config.destination, lost_bytes, sent_bytes); diff --git a/exporting/read_config.c b/exporting/read_config.c index c1cdfd26db..210ba3c667 100644 --- a/exporting/read_config.c +++ b/exporting/read_config.c @@ -176,7 +176,7 @@ inline EXPORTING_OPTIONS exporting_parse_data_source(const char *data_source, EX exporting_options |= EXPORTING_SOURCE_DATA_SUM; exporting_options &= ~(EXPORTING_OPTIONS_SOURCE_BITS ^ EXPORTING_SOURCE_DATA_SUM); } else { - error("EXPORTING: invalid data data_source method '%s'.", data_source); + netdata_log_error("EXPORTING: invalid data data_source method '%s'.", data_source); } return exporting_options; @@ -316,34 +316,34 @@ struct engine *read_exporting_config() netdata_log_info("Instance %s on %s", tmp_ci_list->local_ci.instance_name, tmp_ci_list->local_ci.connector_name); if (tmp_ci_list->exporting_type == EXPORTING_CONNECTOR_TYPE_UNKNOWN) { - error("Unknown exporting connector type"); + netdata_log_error("Unknown exporting connector type"); goto next_connector_instance; } #ifndef ENABLE_PROMETHEUS_REMOTE_WRITE if (tmp_ci_list->exporting_type == EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE) { - error("Prometheus Remote Write support isn't compiled"); + netdata_log_error("Prometheus Remote Write support isn't compiled"); goto next_connector_instance; } #endif #ifndef HAVE_KINESIS if (tmp_ci_list->exporting_type == EXPORTING_CONNECTOR_TYPE_KINESIS) { - error("AWS Kinesis support isn't compiled"); + netdata_log_error("AWS Kinesis support isn't compiled"); goto next_connector_instance; } #endif #ifndef ENABLE_EXPORTING_PUBSUB if (tmp_ci_list->exporting_type == EXPORTING_CONNECTOR_TYPE_PUBSUB) { - error("Google Cloud Pub/Sub support isn't compiled"); + netdata_log_error("Google Cloud Pub/Sub support isn't compiled"); goto next_connector_instance; } #endif #ifndef HAVE_MONGOC if (tmp_ci_list->exporting_type == EXPORTING_CONNECTOR_TYPE_MONGODB) { - error("MongoDB support isn't compiled"); + netdata_log_error("MongoDB support isn't compiled"); goto next_connector_instance; } #endif diff --git a/exporting/send_data.c b/exporting/send_data.c index a58a850b12..5afcb754cb 100644 --- a/exporting/send_data.c +++ b/exporting/send_data.c @@ -96,14 +96,14 @@ void simple_connector_receive_response(int *sock, struct instance *instance) stats->receptions++; } else if (r == 0) { - error("EXPORTING: '%s' closed the socket", instance->config.destination); + netdata_log_error("EXPORTING: '%s' closed the socket", instance->config.destination); close(*sock); *sock = -1; } else { // failed to receive data if (errno != EAGAIN && errno != EWOULDBLOCK) { - error("EXPORTING: cannot receive data from '%s'.", instance->config.destination); + netdata_log_error("EXPORTING: cannot receive data from '%s'.", instance->config.destination); } } @@ -182,7 +182,7 @@ void simple_connector_send_buffer( buffer_flush(buffer); } else { // oops! we couldn't send (all or some of the) data - error( + netdata_log_error( "EXPORTING: failed to write data to '%s'. Willing to write %zu bytes, wrote %zd bytes. Will re-connect.", instance->config.destination, buffer_len, @@ -299,7 +299,7 @@ void simple_connector_worker(void *instance_p) if (exporting_tls_is_enabled(instance->config.type, options) && sock != -1) { if (netdata_ssl_exporting_ctx) { if (sock_delnonblock(sock) < 0) - error("Exporting cannot remove the non-blocking flag from socket %d", sock); + netdata_log_error("Exporting cannot remove the non-blocking flag from socket %d", sock); if(netdata_ssl_open(&connector_specific_data->ssl, netdata_ssl_exporting_ctx, sock)) { if(netdata_ssl_connect(&connector_specific_data->ssl)) { @@ -313,7 +313,7 @@ void simple_connector_worker(void *instance_p) tv.tv_sec = 2; if (setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (const char *)&tv, sizeof(tv))) - error("Cannot set timeout to socket %d, this can block communication", sock); + netdata_log_error("Cannot set timeout to socket %d, this can block communication", sock); } } } @@ -340,7 +340,7 @@ void simple_connector_worker(void *instance_p) connector_specific_data->buffer, buffered_metrics); } else { - error("EXPORTING: failed to update '%s'", instance->config.destination); + netdata_log_error("EXPORTING: failed to update '%s'", instance->config.destination); stats->transmission_failures++; // increment the counter we check for data loss diff --git a/health/health.c b/health/health.c index e3ad833e73..3a206afcc9 100644 --- a/health/health.c +++ b/health/health.c @@ -289,20 +289,20 @@ static void health_silencers_init(void) { json_parse(str, NULL, health_silencers_json_read_callback); netdata_log_info("Parsed health silencers file %s", silencers_filename); } else { - error("Cannot read the data from health silencers file %s", silencers_filename); + netdata_log_error("Cannot read the data from health silencers file %s", silencers_filename); } freez(str); } } else { - error( - "Health silencers file %s has the size %" PRId64 " that is out of range[ 1 , %d ]. Aborting read.", - silencers_filename, - (int64_t)length, - HEALTH_SILENCERS_MAX_FILE_LEN); + netdata_log_error("Health silencers file %s has the size %" PRId64 " that is out of range[ 1 , %d ]. Aborting read.", + silencers_filename, + (int64_t)length, + HEALTH_SILENCERS_MAX_FILE_LEN); } fclose(fd); } else { - netdata_log_info("Cannot open the file %s, so Netdata will work with the default health configuration.",silencers_filename); + netdata_log_info("Cannot open the file %s, so Netdata will work with the default health configuration.", + silencers_filename); } } @@ -589,7 +589,7 @@ static inline void health_alarm_execute(RRDHOST *host, ALARM_ENTRY *ae) { enqueue_alarm_notify_in_progress(ae); health_alarm_log_save(host, ae); } else { - error("Failed to format command arguments"); + netdata_log_error("Failed to format command arguments"); } buffer_free(wb); @@ -803,7 +803,10 @@ static void initialize_health(RRDHOST *host) long n = config_get_number(CONFIG_SECTION_HEALTH, "in memory max health log entries", host->health_log.max); if(n < 10) { - error("Host '%s': health configuration has invalid max log entries %ld. Using default %u", rrdhost_hostname(host), n, host->health_log.max); + netdata_log_error("Host '%s': health configuration has invalid max log entries %ld. Using default %u", + rrdhost_hostname(host), + n, + host->health_log.max); config_set_number(CONFIG_SECTION_HEALTH, "in memory max health log entries", (long)host->health_log.max); } else diff --git a/health/health_config.c b/health/health_config.c index 92c26d1f6f..a900f67eac 100644 --- a/health/health_config.c +++ b/health/health_config.c @@ -61,36 +61,36 @@ static inline int health_parse_delay( if(!strcasecmp(key, "up")) { if (!config_parse_duration(value, delay_up_duration)) { - error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", - line, filename, value, key); + netdata_log_error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", + line, filename, value, key); } else given_up = 1; } else if(!strcasecmp(key, "down")) { if (!config_parse_duration(value, delay_down_duration)) { - error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", - line, filename, value, key); + netdata_log_error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", + line, filename, value, key); } else given_down = 1; } else if(!strcasecmp(key, "multiplier")) { *delay_multiplier = strtof(value, NULL); if(isnan(*delay_multiplier) || isinf(*delay_multiplier) || islessequal(*delay_multiplier, 0)) { - error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", - line, filename, value, key); + netdata_log_error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", + line, filename, value, key); } else given_multiplier = 1; } else if(!strcasecmp(key, "max")) { if (!config_parse_duration(value, delay_max_duration)) { - error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", - line, filename, value, key); + netdata_log_error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", + line, filename, value, key); } else given_max = 1; } else { - error("Health configuration at line %zu of file '%s': unknown keyword '%s'", - line, filename, key); + netdata_log_error("Health configuration at line %zu of file '%s': unknown keyword '%s'", + line, filename, key); } } @@ -136,7 +136,7 @@ static inline uint32_t health_parse_options(const char *s) { if(!strcasecmp(buf, "no-clear-notification") || !strcasecmp(buf, "no-clear")) options |= RRDCALC_OPTION_NO_CLEAR_NOTIFICATION; else - error("Ignoring unknown alarm option '%s'", buf); + netdata_log_error("Ignoring unknown alarm option '%s'", buf); } } @@ -171,14 +171,14 @@ static inline int health_parse_repeat( } if(!strcasecmp(key, "warning")) { if (!config_parse_duration(value, (int*)warn_repeat_every)) { - error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", - line, file, value, key); + netdata_log_error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", + line, file, value, key); } } else if(!strcasecmp(key, "critical")) { if (!config_parse_duration(value, (int*)crit_repeat_every)) { - error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", - line, file, value, key); + netdata_log_error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", + line, file, value, key); } } } @@ -326,14 +326,14 @@ static inline int health_parse_db_lookup( while(*s && !isspace(*s)) s++; while(*s && isspace(*s)) *s++ = '\0'; if(!*s) { - error("Health configuration invalid chart calculation at line %zu of file '%s': expected group method followed by the 'after' time, but got '%s'", - line, filename, key); + netdata_log_error("Health configuration invalid chart calculation at line %zu of file '%s': expected group method followed by the 'after' time, but got '%s'", + line, filename, key); return 0; } if((*group_method = time_grouping_parse(key, RRDR_GROUPING_UNDEFINED)) == RRDR_GROUPING_UNDEFINED) { - error("Health configuration at line %zu of file '%s': invalid group method '%s'", - line, filename, key); + netdata_log_error("Health configuration at line %zu of file '%s': invalid group method '%s'", + line, filename, key); return 0; } @@ -343,8 +343,8 @@ static inline int health_parse_db_lookup( while(*s && isspace(*s)) *s++ = '\0'; if(!config_parse_duration(key, after)) { - error("Health configuration at line %zu of file '%s': invalid duration '%s' after group method", - line, filename, key); + netdata_log_error("Health configuration at line %zu of file '%s': invalid duration '%s' after group method", + line, filename, key); return 0; } @@ -364,8 +364,8 @@ static inline int health_parse_db_lookup( while(*s && isspace(*s)) *s++ = '\0'; if (!config_parse_duration(value, before)) { - error("Health configuration at line %zu of file '%s': invalid duration '%s' for '%s' keyword", - line, filename, value, key); + netdata_log_error("Health configuration at line %zu of file '%s': invalid duration '%s' for '%s' keyword", + line, filename, value, key); } } else if(!strcasecmp(key, HEALTH_EVERY_KEY)) { @@ -374,8 +374,8 @@ static inline int health_parse_db_lookup( while(*s && isspace(*s)) *s++ = '\0'; if (!config_parse_duration(value, every)) { - error("Health configuration at line %zu of file '%s': invalid duration '%s' for '%s' keyword", - line, filename, value, key); + netdata_log_error("Health configuration at line %zu of file '%s': invalid duration '%s' for '%s' keyword", + line, filename, value, key); } } else if(!strcasecmp(key, "absolute") || !strcasecmp(key, "abs") || !strcasecmp(key, "absolute_sum")) { @@ -422,8 +422,8 @@ static inline int health_parse_db_lookup( break; } else { - error("Health configuration at line %zu of file '%s': unknown keyword '%s'", - line, filename, key); + netdata_log_error("Health configuration at line %zu of file '%s': unknown keyword '%s'", + line, filename, key); } } @@ -574,7 +574,7 @@ static int health_readfile(const char *filename, void *data) { FILE *fp = fopen(filename, "r"); if(!fp) { - error("Health configuration cannot read file '%s'.", filename); + netdata_log_error("Health configuration cannot read file '%s'.", filename); return 0; } @@ -598,7 +598,8 @@ static int health_readfile(const char *filename, void *data) { if(append < HEALTH_CONF_MAX_LINE) continue; else { - error("Health configuration has too long multi-line at line %zu of file '%s'.", line, filename); + netdata_log_error("Health configuration has too long multi-line at line %zu of file '%s'.", + line, filename); } } append = 0; @@ -606,7 +607,8 @@ static int health_readfile(const char *filename, void *data) { char *key = s; while(*s && *s != ':') s++; if(!*s) { - error("Health configuration has invalid line %zu of file '%s'. It does not contain a ':'. Ignoring it.", line, filename); + netdata_log_error("Health configuration has invalid line %zu of file '%s'. It does not contain a ':'. Ignoring it.", + line, filename); continue; } *s = '\0'; @@ -617,12 +619,14 @@ static int health_readfile(const char *filename, void *data) { value = trim_all(value); if(!key) { - error("Health configuration has invalid line %zu of file '%s'. Keyword is empty. Ignoring it.", line, filename); + netdata_log_error("Health configuration has invalid line %zu of file '%s'. Keyword is empty. Ignoring it.", + line, filename); continue; } if(!value) { - error("Health configuration has invalid line %zu of file '%s'. value is empty. Ignoring it.", line, filename); + netdata_log_error("Health configuration has invalid line %zu of file '%s'. value is empty. Ignoring it.", + line, filename); continue; } @@ -654,7 +658,7 @@ static int health_readfile(const char *filename, void *data) { { char *tmp = strdupz(value); if(rrdvar_fix_name(tmp)) - error("Health configuration renamed alarm '%s' to '%s'", value, tmp); + netdata_log_error("Health configuration renamed alarm '%s' to '%s'", value, tmp); rc->name = string_strdupz(tmp); freez(tmp); @@ -704,7 +708,7 @@ static int health_readfile(const char *filename, void *data) { { char *tmp = strdupz(value); if(rrdvar_fix_name(tmp)) - error("Health configuration renamed template '%s' to '%s'", value, tmp); + netdata_log_error("Health configuration renamed template '%s' to '%s'", value, tmp); rt->name = string_strdupz(tmp); freez(tmp); @@ -766,8 +770,8 @@ static int health_readfile(const char *filename, void *data) { alert_cfg->on = string_strdupz(value); if(rc->chart) { if(strcmp(rrdcalc_chart_name(rc), value) != 0) - error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, filename, rrdcalc_name(rc), key, rrdcalc_chart_name(rc), value, value); + netdata_log_error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rrdcalc_name(rc), key, rrdcalc_chart_name(rc), value, value); string_freez(rc->chart); } @@ -779,8 +783,8 @@ static int health_readfile(const char *filename, void *data) { alert_cfg->classification = string_strdupz(value); if(rc->classification) { if(strcmp(rrdcalc_classification(rc), value) != 0) - error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, filename, rrdcalc_name(rc), key, rrdcalc_classification(rc), value, value); + netdata_log_error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rrdcalc_name(rc), key, rrdcalc_classification(rc), value, value); string_freez(rc->classification); } @@ -792,7 +796,7 @@ static int health_readfile(const char *filename, void *data) { alert_cfg->component = string_strdupz(value); if(rc->component) { if(strcmp(rrdcalc_component(rc), value) != 0) - error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + netdata_log_error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", line, filename, rrdcalc_name(rc), key, rrdcalc_component(rc), value, value); string_freez(rc->component); @@ -805,8 +809,8 @@ static int health_readfile(const char *filename, void *data) { alert_cfg->type = string_strdupz(value); if(rc->type) { if(strcmp(rrdcalc_type(rc), value) != 0) - error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, filename, rrdcalc_name(rc), key, rrdcalc_type(rc), value, value); + netdata_log_error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rrdcalc_name(rc), key, rrdcalc_type(rc), value, value); string_freez(rc->type); } @@ -834,8 +838,8 @@ static int health_readfile(const char *filename, void *data) { else if(hash == hash_every && !strcasecmp(key, HEALTH_EVERY_KEY)) { alert_cfg->every = string_strdupz(value); if(!config_parse_duration(value, &rc->update_every)) - error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' cannot parse duration: '%s'.", - line, filename, rrdcalc_name(rc), key, value); + netdata_log_error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' cannot parse duration: '%s'.", + line, filename, rrdcalc_name(rc), key, value); alert_cfg->p_update_every = rc->update_every; } else if(hash == hash_green && !strcasecmp(key, HEALTH_GREEN_KEY)) { @@ -843,8 +847,8 @@ static int health_readfile(const char *filename, void *data) { char *e; rc->green = str2ndd(value, &e); if(e && *e) { - error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' leaves this string unmatched: '%s'.", - line, filename, rrdcalc_name(rc), key, e); + netdata_log_error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' leaves this string unmatched: '%s'.", + line, filename, rrdcalc_name(rc), key, e); } } else if(hash == hash_red && !strcasecmp(key, HEALTH_RED_KEY)) { @@ -852,8 +856,8 @@ static int health_readfile(const char *filename, void *data) { char *e; rc->red = str2ndd(value, &e); if(e && *e) { - error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' leaves this string unmatched: '%s'.", - line, filename, rrdcalc_name(rc), key, e); + netdata_log_error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' leaves this string unmatched: '%s'.", + line, filename, rrdcalc_name(rc), key, e); } } else if(hash == hash_calc && !strcasecmp(key, HEALTH_CALC_KEY)) { @@ -862,8 +866,8 @@ static int health_readfile(const char *filename, void *data) { int error = 0; rc->calculation = expression_parse(value, &failed_at, &error); if(!rc->calculation) { - error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", - line, filename, rrdcalc_name(rc), key, value, expression_strerror(error), failed_at); + netdata_log_error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", + line, filename, rrdcalc_name(rc), key, value, expression_strerror(error), failed_at); } parse_variables_and_store_in_health_rrdvars(value, HEALTH_CONF_MAX_LINE); } @@ -873,8 +877,8 @@ static int health_readfile(const char *filename, void *data) { int error = 0; rc->warning = expression_parse(value, &failed_at, &error); if(!rc->warning) { - error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", - line, filename, rrdcalc_name(rc), key, value, expression_strerror(error), failed_at); + netdata_log_error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", + line, filename, rrdcalc_name(rc), key, value, expression_strerror(error), failed_at); } parse_variables_and_store_in_health_rrdvars(value, HEALTH_CONF_MAX_LINE); } @@ -884,8 +888,8 @@ static int health_readfile(const char *filename, void *data) { int error = 0; rc->critical = expression_parse(value, &failed_at, &error); if(!rc->critical) { - error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", - line, filename, rrdcalc_name(rc), key, value, expression_strerror(error), failed_at); + netdata_log_error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", + line, filename, rrdcalc_name(rc), key, value, expression_strerror(error), failed_at); } parse_variables_and_store_in_health_rrdvars(value, HEALTH_CONF_MAX_LINE); } @@ -893,8 +897,8 @@ static int health_readfile(const char *filename, void *data) { alert_cfg->exec = string_strdupz(value); if(rc->exec) { if(strcmp(rrdcalc_exec(rc), value) != 0) - error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, filename, rrdcalc_name(rc), key, rrdcalc_exec(rc), value, value); + netdata_log_error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rrdcalc_name(rc), key, rrdcalc_exec(rc), value, value); string_freez(rc->exec); } @@ -904,8 +908,8 @@ static int health_readfile(const char *filename, void *data) { alert_cfg->to = string_strdupz(value); if(rc->recipient) { if(strcmp(rrdcalc_recipient(rc), value) != 0) - error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, filename, rrdcalc_name(rc), key, rrdcalc_recipient(rc), value, value); + netdata_log_error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rrdcalc_name(rc), key, rrdcalc_recipient(rc), value, value); string_freez(rc->recipient); } @@ -917,8 +921,8 @@ static int health_readfile(const char *filename, void *data) { alert_cfg->units = string_strdupz(value); if(rc->units) { if(strcmp(rrdcalc_units(rc), value) != 0) - error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, filename, rrdcalc_name(rc), key, rrdcalc_units(rc), value, value); + netdata_log_error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rrdcalc_name(rc), key, rrdcalc_units(rc), value, value); string_freez(rc->units); } @@ -930,8 +934,8 @@ static int health_readfile(const char *filename, void *data) { alert_cfg->info = string_strdupz(value); if(rc->info) { if(strcmp(rrdcalc_info(rc), value) != 0) - error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, filename, rrdcalc_name(rc), key, rrdcalc_info(rc), value, value); + netdata_log_error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rrdcalc_name(rc), key, rrdcalc_info(rc), value, value); string_freez(rc->info); string_freez(rc->original_info); @@ -957,8 +961,8 @@ static int health_readfile(const char *filename, void *data) { alert_cfg->host_labels = string_strdupz(value); if(rc->host_labels) { if(strcmp(rrdcalc_host_labels(rc), value) != 0) - error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'.", - line, filename, rrdcalc_name(rc), key, value, value); + netdata_log_error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'.", + line, filename, rrdcalc_name(rc), key, value, value); string_freez(rc->host_labels); simple_pattern_free(rc->host_labels_pattern); @@ -992,8 +996,8 @@ static int health_readfile(const char *filename, void *data) { alert_cfg->chart_labels = string_strdupz(value); if(rc->chart_labels) { if(strcmp(rrdcalc_chart_labels(rc), value) != 0) - error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'.", - line, filename, rrdcalc_name(rc), key, value, value); + netdata_log_error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'.", + line, filename, rrdcalc_name(rc), key, value, value); string_freez(rc->chart_labels); simple_pattern_free(rc->chart_labels_pattern); @@ -1010,8 +1014,8 @@ static int health_readfile(const char *filename, void *data) { true); } else { - error("Health configuration at line %zu of file '%s' for alarm '%s' has unknown key '%s'.", - line, filename, rrdcalc_name(rc), key); + netdata_log_error("Health configuration at line %zu of file '%s' for alarm '%s' has unknown key '%s'.", + line, filename, rrdcalc_name(rc), key); } } else if(rt) { @@ -1019,8 +1023,8 @@ static int health_readfile(const char *filename, void *data) { alert_cfg->on = string_strdupz(value); if(rt->context) { if(strcmp(string2str(rt->context), value) != 0) - error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, filename, rrdcalctemplate_name(rt), key, string2str(rt->context), value, value); + netdata_log_error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rrdcalctemplate_name(rt), key, string2str(rt->context), value, value); string_freez(rt->context); } @@ -1032,8 +1036,8 @@ static int health_readfile(const char *filename, void *data) { alert_cfg->classification = string_strdupz(value); if(rt->classification) { if(strcmp(rrdcalctemplate_classification(rt), value) != 0) - error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, filename, rrdcalctemplate_name(rt), key, rrdcalctemplate_classification(rt), value, value); + netdata_log_error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rrdcalctemplate_name(rt), key, rrdcalctemplate_classification(rt), value, value); string_freez(rt->classification); } @@ -1045,8 +1049,8 @@ static int health_readfile(const char *filename, void *data) { alert_cfg->component = string_strdupz(value); if(rt->component) { if(strcmp(rrdcalctemplate_component(rt), value) != 0) - error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, filename, rrdcalctemplate_name(rt), key, rrdcalctemplate_component(rt), value, value); + netdata_log_error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rrdcalctemplate_name(rt), key, rrdcalctemplate_component(rt), value, value); string_freez(rt->component); } @@ -1058,8 +1062,8 @@ static int health_readfile(const char *filename, void *data) { alert_cfg->type = string_strdupz(value); if(rt->type) { if(strcmp(rrdcalctemplate_type(rt), value) != 0) - error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, filename, rrdcalctemplate_name(rt), key, rrdcalctemplate_type(rt), value, value); + netdata_log_error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rrdcalctemplate_name(rt), key, rrdcalctemplate_type(rt), value, value); string_freez(rt->type); } @@ -1125,8 +1129,8 @@ static int health_readfile(const char *filename, void *data) { else if(hash == hash_every && !strcasecmp(key, HEALTH_EVERY_KEY)) { alert_cfg->every = string_strdupz(value); if(!config_parse_duration(value, &rt->update_every)) - error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' cannot parse duration: '%s'.", - line, filename, rrdcalctemplate_name(rt), key, value); + netdata_log_error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' cannot parse duration: '%s'.", + line, filename, rrdcalctemplate_name(rt), key, value); alert_cfg->p_update_every = rt->update_every; } else if(hash == hash_green && !strcasecmp(key, HEALTH_GREEN_KEY)) { @@ -1134,8 +1138,8 @@ static int health_readfile(const char *filename, void *data) { char *e; rt->green = str2ndd(value, &e); if(e && *e) { - error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' leaves this string unmatched: '%s'.", - line, filename, rrdcalctemplate_name(rt), key, e); + netdata_log_error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' leaves this string unmatched: '%s'.", + line, filename, rrdcalctemplate_name(rt), key, e); } } else if(hash == hash_red && !strcasecmp(key, HEALTH_RED_KEY)) { @@ -1143,8 +1147,8 @@ static int health_readfile(const char *filename, void *data) { char *e; rt->red = str2ndd(value, &e); if(e && *e) { - error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' leaves this string unmatched: '%s'.", - line, filename, rrdcalctemplate_name(rt), key, e); + netdata_log_error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' leaves this string unmatched: '%s'.", + line, filename, rrdcalctemplate_name(rt), key, e); } } else if(hash == hash_calc && !strcasecmp(key, HEALTH_CALC_KEY)) { @@ -1153,8 +1157,8 @@ static int health_readfile(const char *filename, void *data) { int error = 0; rt->calculation = expression_parse(value, &failed_at, &error); if(!rt->calculation) { - error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", - line, filename, rrdcalctemplate_name(rt), key, value, expression_strerror(error), failed_at); + netdata_log_error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", + line, filename, rrdcalctemplate_name(rt), key, value, expression_strerror(error), failed_at); } parse_variables_and_store_in_health_rrdvars(value, HEALTH_CONF_MAX_LINE); } @@ -1164,8 +1168,8 @@ static int health_readfile(const char *filename, void *data) { int error = 0; rt->warning = expression_parse(value, &failed_at, &error); if(!rt->warning) { - error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", - line, filename, rrdcalctemplate_name(rt), key, value, expression_strerror(error), failed_at); + netdata_log_error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", + line, filename, rrdcalctemplate_name(rt), key, value, expression_strerror(error), failed_at); } parse_variables_and_store_in_health_rrdvars(value, HEALTH_CONF_MAX_LINE); } @@ -1175,8 +1179,8 @@ static int health_readfile(const char *filename, void *data) { int error = 0; rt->critical = expression_parse(value, &failed_at, &error); if(!rt->critical) { - error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", - line, filename, rrdcalctemplate_name(rt), key, value, expression_strerror(error), failed_at); + netdata_log_error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' has unparse-able expression '%s': %s at '%s'", + line, filename, rrdcalctemplate_name(rt), key, value, expression_strerror(error), failed_at); } parse_variables_and_store_in_health_rrdvars(value, HEALTH_CONF_MAX_LINE); } @@ -1184,8 +1188,8 @@ static int health_readfile(const char *filename, void *data) { alert_cfg->exec = string_strdupz(value); if(rt->exec) { if(strcmp(rrdcalctemplate_exec(rt), value) != 0) - error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, filename, rrdcalctemplate_name(rt), key, rrdcalctemplate_exec(rt), value, value); + netdata_log_error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rrdcalctemplate_name(rt), key, rrdcalctemplate_exec(rt), value, value); string_freez(rt->exec); } @@ -1195,8 +1199,8 @@ static int health_readfile(const char *filename, void *data) { alert_cfg->to = string_strdupz(value); if(rt->recipient) { if(strcmp(rrdcalctemplate_recipient(rt), value) != 0) - error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, filename, rrdcalctemplate_name(rt), key, rrdcalctemplate_recipient(rt), value, value); + netdata_log_error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rrdcalctemplate_name(rt), key, rrdcalctemplate_recipient(rt), value, value); string_freez(rt->recipient); } @@ -1208,8 +1212,8 @@ static int health_readfile(const char *filename, void *data) { alert_cfg->units = string_strdupz(value); if(rt->units) { if(strcmp(rrdcalctemplate_units(rt), value) != 0) - error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, filename, rrdcalctemplate_name(rt), key, rrdcalctemplate_units(rt), value, value); + netdata_log_error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rrdcalctemplate_name(rt), key, rrdcalctemplate_units(rt), value, value); string_freez(rt->units); } @@ -1221,8 +1225,8 @@ static int health_readfile(const char *filename, void *data) { alert_cfg->info = string_strdupz(value); if(rt->info) { if(strcmp(rrdcalctemplate_info(rt), value) != 0) - error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, filename, rrdcalctemplate_name(rt), key, rrdcalctemplate_info(rt), value, value); + netdata_log_error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rrdcalctemplate_name(rt), key, rrdcalctemplate_info(rt), value, value); string_freez(rt->info); } @@ -1246,8 +1250,8 @@ static int health_readfile(const char *filename, void *data) { alert_cfg->host_labels = string_strdupz(value); if(rt->host_labels) { if(strcmp(rrdcalctemplate_host_labels(rt), value) != 0) - error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, filename, rrdcalctemplate_name(rt), key, rrdcalctemplate_host_labels(rt), value, value); + netdata_log_error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rrdcalctemplate_name(rt), key, rrdcalctemplate_host_labels(rt), value, value); string_freez(rt->host_labels); simple_pattern_free(rt->host_labels_pattern); @@ -1266,8 +1270,8 @@ static int health_readfile(const char *filename, void *data) { alert_cfg->chart_labels = string_strdupz(value); if(rt->chart_labels) { if(strcmp(rrdcalctemplate_chart_labels(rt), value) != 0) - error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", - line, filename, rrdcalctemplate_name(rt), key, rrdcalctemplate_chart_labels(rt), value, value); + netdata_log_error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", + line, filename, rrdcalctemplate_name(rt), key, rrdcalctemplate_chart_labels(rt), value, value); string_freez(rt->chart_labels); simple_pattern_free(rt->chart_labels_pattern); @@ -1284,13 +1288,13 @@ static int health_readfile(const char *filename, void *data) { SIMPLE_PATTERN_EXACT, true); } else { - error("Health configuration at line %zu of file '%s' for template '%s' has unknown key '%s'.", - line, filename, rrdcalctemplate_name(rt), key); + netdata_log_error("Health configuration at line %zu of file '%s' for template '%s' has unknown key '%s'.", + line, filename, rrdcalctemplate_name(rt), key); } } else { - error("Health configuration at line %zu of file '%s' has unknown key '%s'. Expected either '" HEALTH_ALARM_KEY "' or '" HEALTH_TEMPLATE_KEY "'.", - line, filename, key); + netdata_log_error("Health configuration at line %zu of file '%s' has unknown key '%s'. Expected either '" HEALTH_ALARM_KEY "' or '" HEALTH_TEMPLATE_KEY "'.", + line, filename, key); } } diff --git a/libnetdata/aral/aral.c b/libnetdata/aral/aral.c index 823df40598..16328db69f 100644 --- a/libnetdata/aral/aral.c +++ b/libnetdata/aral/aral.c @@ -194,7 +194,7 @@ static void aral_delete_leftover_files(const char *name, const char *path, const snprintfz(full_path, FILENAME_MAX, "%s/%s", path, de->d_name); netdata_log_info("ARAL: '%s' removing left-over file '%s'", name, full_path); if(unlikely(unlink(full_path) == -1)) - error("ARAL: '%s' cannot delete file '%s'", name, full_path); + netdata_log_error("ARAL: '%s' cannot delete file '%s'", name, full_path); } closedir(dir); @@ -324,7 +324,7 @@ void aral_del_page___no_lock_needed(ARAL *ar, ARAL_PAGE *page TRACE_ALLOCATIONS_ netdata_munmap(page->data, page->size); if (unlikely(unlink(page->filename) == 1)) - error("Cannot delete file '%s'", page->filename); + netdata_log_error("Cannot delete file '%s'", page->filename); freez((void *)page->filename); @@ -764,7 +764,7 @@ ARAL *aral_create(const char *name, size_t element_size, size_t initial_page_ele ar->config.initial_page_elements = 2; if(ar->config.mmap.enabled && (!ar->config.mmap.cache_dir || !*ar->config.mmap.cache_dir)) { - error("ARAL: '%s' mmap cache directory is not configured properly, disabling mmap.", ar->config.name); + netdata_log_error("ARAL: '%s' mmap cache directory is not configured properly, disabling mmap.", ar->config.name); ar->config.mmap.enabled = false; internal_fatal(true, "ARAL: '%s' mmap cache directory is not configured properly", ar->config.name); } diff --git a/libnetdata/buffer/buffer.c b/libnetdata/buffer/buffer.c index 8d80c9fe0e..c54382abd3 100644 --- a/libnetdata/buffer/buffer.c +++ b/libnetdata/buffer/buffer.c @@ -367,8 +367,8 @@ static int buffer_expect(BUFFER *wb, const char *expected) { const char *generated = buffer_tostring(wb); if(strcmp(generated, expected) != 0) { - error("BUFFER: mismatch.\nGenerated:\n%s\nExpected:\n%s\n", - generated, expected); + netdata_log_error("BUFFER: mismatch.\nGenerated:\n%s\nExpected:\n%s\n", + generated, expected); return 1; } @@ -385,8 +385,8 @@ static int buffer_uint64_roundtrip(BUFFER *wb, NUMBER_ENCODING encoding, uint64_ uint64_t v = str2ull_encoded(buffer_tostring(wb)); if(v != value) { - error("BUFFER: string '%s' does resolves to %llu, expected %llu", - buffer_tostring(wb), (unsigned long long)v, (unsigned long long)value); + netdata_log_error("BUFFER: string '%s' does resolves to %llu, expected %llu", + buffer_tostring(wb), (unsigned long long)v, (unsigned long long)value); errors++; } buffer_flush(wb); @@ -403,8 +403,8 @@ static int buffer_int64_roundtrip(BUFFER *wb, NUMBER_ENCODING encoding, int64_t int64_t v = str2ll_encoded(buffer_tostring(wb)); if(v != value) { - error("BUFFER: string '%s' does resolves to %lld, expected %lld", - buffer_tostring(wb), (long long)v, (long long)value); + netdata_log_error("BUFFER: string '%s' does resolves to %lld, expected %lld", + buffer_tostring(wb), (long long)v, (long long)value); errors++; } buffer_flush(wb); @@ -421,8 +421,8 @@ static int buffer_double_roundtrip(BUFFER *wb, NUMBER_ENCODING encoding, NETDATA NETDATA_DOUBLE v = str2ndd_encoded(buffer_tostring(wb), NULL); if(v != value) { - error("BUFFER: string '%s' does resolves to %.12f, expected %.12f", - buffer_tostring(wb), v, value); + netdata_log_error("BUFFER: string '%s' does resolves to %.12f, expected %.12f", + buffer_tostring(wb), v, value); errors++; } buffer_flush(wb); diff --git a/libnetdata/clocks/clocks.c b/libnetdata/clocks/clocks.c index 4a178b320d..806dc06a32 100644 --- a/libnetdata/clocks/clocks.c +++ b/libnetdata/clocks/clocks.c @@ -14,7 +14,7 @@ usec_t clock_realtime_resolution = 1000; inline int clock_gettime(clockid_t clk_id __maybe_unused, struct timespec *ts) { struct timeval tv; if(unlikely(gettimeofday(&tv, NULL) == -1)) { - error("gettimeofday() failed."); + netdata_log_error("gettimeofday() failed."); return -1; } ts->tv_sec = tv.tv_sec; @@ -79,7 +79,7 @@ void clocks_init(void) { inline time_t now_sec(clockid_t clk_id) { struct timespec ts; if(unlikely(clock_gettime(clk_id, &ts) == -1)) { - error("clock_gettime(%d, ×pec) failed.", clk_id); + netdata_log_error("clock_gettime(%d, ×pec) failed.", clk_id); return 0; } return ts.tv_sec; @@ -88,7 +88,7 @@ inline time_t now_sec(clockid_t clk_id) { inline usec_t now_usec(clockid_t clk_id) { struct timespec ts; if(unlikely(clock_gettime(clk_id, &ts) == -1)) { - error("clock_gettime(%d, ×pec) failed.", clk_id); + netdata_log_error("clock_gettime(%d, ×pec) failed.", clk_id); return 0; } return (usec_t)ts.tv_sec * USEC_PER_SEC + (ts.tv_nsec % NSEC_PER_SEC) / NSEC_PER_USEC; @@ -98,7 +98,7 @@ inline int now_timeval(clockid_t clk_id, struct timeval *tv) { struct timespec ts; if(unlikely(clock_gettime(clk_id, &ts) == -1)) { - error("clock_gettime(%d, ×pec) failed.", clk_id); + netdata_log_error("clock_gettime(%d, ×pec) failed.", clk_id); tv->tv_sec = 0; tv->tv_usec = 0; return -1; @@ -200,30 +200,27 @@ void sleep_to_absolute_time(usec_t usec) { if (ret == EINVAL) { if (!einval_printed) { einval_printed++; - error( - "Invalid time given to clock_nanosleep(): clockid = %d, tv_sec = %lld, tv_nsec = %ld", - clock, - (long long)req.tv_sec, - req.tv_nsec); + netdata_log_error("Invalid time given to clock_nanosleep(): clockid = %d, tv_sec = %lld, tv_nsec = %ld", + clock, + (long long)req.tv_sec, + req.tv_nsec); } } else if (ret == ENOTSUP) { if (!enotsup_printed) { enotsup_printed++; - error( - "Invalid clock id given to clock_nanosleep(): clockid = %d, tv_sec = %lld, tv_nsec = %ld", - clock, - (long long)req.tv_sec, - req.tv_nsec); + netdata_log_error("Invalid clock id given to clock_nanosleep(): clockid = %d, tv_sec = %lld, tv_nsec = %ld", + clock, + (long long)req.tv_sec, + req.tv_nsec); } } else { if (!eunknown_printed) { eunknown_printed++; - error( - "Unknown return value %d from clock_nanosleep(): clockid = %d, tv_sec = %lld, tv_nsec = %ld", - ret, - clock, - (long long)req.tv_sec, - req.tv_nsec); + netdata_log_error("Unknown return value %d from clock_nanosleep(): clockid = %d, tv_sec = %lld, tv_nsec = %ld", + ret, + clock, + (long long)req.tv_sec, + req.tv_nsec); } } sleep_usec(usec); @@ -384,7 +381,7 @@ void sleep_usec_with_now(usec_t usec, usec_t started_ut) { } } else { - error("Cannot nanosleep() for %llu microseconds.", usec); + netdata_log_error("Cannot nanosleep() for %llu microseconds.", usec); break; } } @@ -394,7 +391,7 @@ static inline collected_number uptime_from_boottime(void) { #ifdef CLOCK_BOOTTIME_IS_AVAILABLE return (collected_number)(now_boottime_usec() / USEC_PER_MS); #else - error("uptime cannot be read from CLOCK_BOOTTIME on this system."); + netdata_log_error("uptime cannot be read from CLOCK_BOOTTIME on this system."); return 0; #endif } @@ -410,11 +407,11 @@ static inline collected_number read_proc_uptime(char *filename) { if(unlikely(!read_proc_uptime_ff)) return 0; if(unlikely(procfile_lines(read_proc_uptime_ff) < 1)) { - error("/proc/uptime has no lines."); + netdata_log_error("/proc/uptime has no lines."); return 0; } if(unlikely(procfile_linewords(read_proc_uptime_ff, 0) < 1)) { - error("/proc/uptime has less than 1 word in it."); + netdata_log_error("/proc/uptime has less than 1 word in it."); return 0; } @@ -441,7 +438,7 @@ inline collected_number uptime_msec(char *filename){ use_boottime = 0; } else { - error("Cannot find any way to read uptime on this system."); + netdata_log_error("Cannot find any way to read uptime on this system."); return 1; } } diff --git a/libnetdata/config/appconfig.c b/libnetdata/config/appconfig.c index adca337614..59a1da7897 100644 --- a/libnetdata/config/appconfig.c +++ b/libnetdata/config/appconfig.c @@ -62,7 +62,7 @@ int is_valid_connector(char *type, int check_reserved) } // else { // if (unlikely(is_valid_connector(type,1))) { -// error("Section %s invalid -- reserved name", type); +// netdata_log_error("Section %s invalid -- reserved name", type); // return 0; // } // } @@ -174,7 +174,7 @@ static inline struct section *appconfig_section_create(struct config *root, cons avl_init_lock(&co->values_index, appconfig_option_compare); if(unlikely(appconfig_index_add(root, co) != co)) - error("INTERNAL ERROR: indexing of section '%s', already exists.", co->name); + netdata_log_error("INTERNAL ERROR: indexing of section '%s', already exists.", co->name); appconfig_wrlock(root); struct section *co2 = root->last_section; @@ -198,7 +198,7 @@ void appconfig_section_destroy_non_loaded(struct config *root, const char *secti co = appconfig_section_find(root, section); if(!co) { - error("Could not destroy section '%s'. Not found.", section); + netdata_log_error("Could not destroy section '%s'. Not found.", section); return; } @@ -213,7 +213,7 @@ void appconfig_section_destroy_non_loaded(struct config *root, const char *secti for(cv = co->values ; cv ; cv = cv_next) { cv_next = cv->next; if(unlikely(!appconfig_option_index_del(co, cv))) - error("Cannot remove config option '%s' from section '%s'.", cv->name, co->name); + netdata_log_error("Cannot remove config option '%s' from section '%s'.", cv->name, co->name); freez(cv->value); freez(cv->name); freez(cv); @@ -222,7 +222,7 @@ void appconfig_section_destroy_non_loaded(struct config *root, const char *secti config_section_unlock(co); if (unlikely(!appconfig_index_del(root, co))) { - error("Cannot remove section '%s' from config.", section); + netdata_log_error("Cannot remove section '%s' from config.", section); return; } @@ -264,7 +264,7 @@ void appconfig_section_option_destroy_non_loaded(struct config *root, const char struct section *co; co = appconfig_section_find(root, section); if (!co) { - error("Could not destroy section option '%s -> %s'. The section not found.", section, name); + netdata_log_error("Could not destroy section option '%s -> %s'. The section not found.", section, name); return; } @@ -281,7 +281,7 @@ void appconfig_section_option_destroy_non_loaded(struct config *root, const char if (unlikely(!(cv && appconfig_option_index_del(co, cv)))) { config_section_unlock(co); - error("Could not destroy section option '%s -> %s'. The option not found.", section, name); + netdata_log_error("Could not destroy section option '%s -> %s'. The option not found.", section, name); return; } @@ -319,7 +319,7 @@ static inline struct config_option *appconfig_value_create(struct section *co, c struct config_option *found = appconfig_option_index_add(co, cv); if(found != cv) { - error("indexing of config '%s' in section '%s': already exists - using the existing one.", cv->name, co->name); + netdata_log_error("indexing of config '%s' in section '%s': already exists - using the existing one.", cv->name, co->name); freez(cv->value); freez(cv->name); freez(cv); @@ -375,7 +375,7 @@ int appconfig_move(struct config *root, const char *section_old, const char *nam if(cv_new) goto cleanup; if(unlikely(appconfig_option_index_del(co_old, cv_old) != cv_old)) - error("INTERNAL ERROR: deletion of config '%s' from section '%s', deleted the wrong config entry.", cv_old->name, co_old->name); + netdata_log_error("INTERNAL ERROR: deletion of config '%s' from section '%s', deleted the wrong config entry.", cv_old->name, co_old->name); if(co_old->values == cv_old) { co_old->values = cv_old->next; @@ -384,7 +384,7 @@ int appconfig_move(struct config *root, const char *section_old, const char *nam struct config_option *t; for(t = co_old->values; t && t->next != cv_old ;t = t->next) ; if(!t || t->next != cv_old) - error("INTERNAL ERROR: cannot find variable '%s' in section '%s' of the config - but it should be there.", cv_old->name, co_old->name); + netdata_log_error("INTERNAL ERROR: cannot find variable '%s' in section '%s' of the config - but it should be there.", cv_old->name, co_old->name); else t->next = cv_old->next; } @@ -398,7 +398,7 @@ int appconfig_move(struct config *root, const char *section_old, const char *nam co_new->values = cv_new; if(unlikely(appconfig_option_index_add(co_new, cv_old) != cv_old)) - error("INTERNAL ERROR: re-indexing of config '%s' in section '%s', already exists.", cv_old->name, co_new->name); + netdata_log_error("INTERNAL ERROR: re-indexing of config '%s' in section '%s', already exists.", cv_old->name, co_new->name); ret = 0; @@ -618,7 +618,7 @@ int appconfig_get_duration(struct config *root, const char *section, const char if(!s) goto fallback; if(!config_parse_duration(s, &result)) { - error("config option '[%s].%s = %s' is configured with an valid duration", section, name, s); + netdata_log_error("config option '[%s].%s = %s' is configured with an valid duration", section, name, s); goto fallback; } @@ -626,7 +626,7 @@ int appconfig_get_duration(struct config *root, const char *section, const char fallback: if(!config_parse_duration(value, &result)) - error("INTERNAL ERROR: default duration supplied for option '[%s].%s = %s' is not a valid duration", section, name, value); + netdata_log_error("INTERNAL ERROR: default duration supplied for option '[%s].%s = %s' is not a valid duration", section, name, value); return result; } @@ -696,13 +696,13 @@ int appconfig_load(struct config *root, char *filename, int overwrite_used, cons strncpyz(working_instance, s, CONFIG_MAX_NAME); working_connector_section = NULL; if (unlikely(appconfig_section_find(root, working_instance))) { - error("Instance (%s) already exists", working_instance); + netdata_log_error("Instance (%s) already exists", working_instance); co = NULL; continue; } } else { co = NULL; - error("Section (%s) does not specify a valid connector", s); + netdata_log_error("Section (%s) does not specify a valid connector", s); continue; } } @@ -718,7 +718,7 @@ int appconfig_load(struct config *root, char *filename, int overwrite_used, cons struct config_option *save = cv2->next; struct config_option *found = appconfig_option_index_del(co, cv2); if(found != cv2) - error("INTERNAL ERROR: Cannot remove '%s' from section '%s', it was not inserted before.", + netdata_log_error("INTERNAL ERROR: Cannot remove '%s' from section '%s', it was not inserted before.", cv2->name, co->name); freez(cv2->name); @@ -735,7 +735,7 @@ int appconfig_load(struct config *root, char *filename, int overwrite_used, cons if(!co) { // line outside a section - error("CONFIG: ignoring line %d ('%s') of file '%s', it is outside all sections.", line, s, filename); + netdata_log_error("CONFIG: ignoring line %d ('%s') of file '%s', it is outside all sections.", line, s, filename); continue; } @@ -746,7 +746,7 @@ int appconfig_load(struct config *root, char *filename, int overwrite_used, cons char *name = s; char *value = strchr(s, '='); if(!value) { - error("CONFIG: ignoring line %d ('%s') of file '%s', there is no = in it.", line, s, filename); + netdata_log_error("CONFIG: ignoring line %d ('%s') of file '%s', there is no = in it.", line, s, filename); continue; } *value = '\0'; @@ -756,7 +756,7 @@ int appconfig_load(struct config *root, char *filename, int overwrite_used, cons value = trim(value); if(!name || *name == '#') { - error("CONFIG: ignoring line %d of file '%s', name is empty.", line, filename); + netdata_log_error("CONFIG: ignoring line %d of file '%s', name is empty.", line, filename); continue; } diff --git a/libnetdata/dictionary/dictionary.c b/libnetdata/dictionary/dictionary.c index abcc2ccf5e..f18a7f87f9 100644 --- a/libnetdata/dictionary/dictionary.c +++ b/libnetdata/dictionary/dictionary.c @@ -952,7 +952,7 @@ static int item_check_and_acquire_advanced(DICTIONARY *dict, DICTIONARY_ITEM *it if (having_index_lock) { // delete it from the hashtable if(hashtable_delete_unsafe(dict, item_get_name(item), item->key_len, item) == 0) - error("DICTIONARY: INTERNAL ERROR VIEW: tried to delete item with name '%s', name_len %u that is not in the index", item_get_name(item), (KEY_LEN_TYPE)(item->key_len - 1)); + netdata_log_error("DICTIONARY: INTERNAL ERROR VIEW: tried to delete item with name '%s', name_len %u that is not in the index", item_get_name(item), (KEY_LEN_TYPE)(item->key_len - 1)); else pointer_del(dict, item); @@ -1065,8 +1065,8 @@ static size_t hashtable_destroy_unsafe(DICTIONARY *dict) { JError_t J_Error; Word_t ret = JudyHSFreeArray(&dict->index.JudyHSArray, &J_Error); if(unlikely(ret == (Word_t) JERR)) { - error("DICTIONARY: Cannot destroy JudyHS, JU_ERRNO_* == %u, ID == %d", - JU_ERRNO(&J_Error), JU_ERRID(&J_Error)); + netdata_log_error("DICTIONARY: Cannot destroy JudyHS, JU_ERRNO_* == %u, ID == %d", + JU_ERRNO(&J_Error), JU_ERRID(&J_Error)); } debug(D_DICTIONARY, "Dictionary: hash table freed %lu bytes", ret); @@ -1079,8 +1079,8 @@ static inline void **hashtable_insert_unsafe(DICTIONARY *dict, const char *name, JError_t J_Error; Pvoid_t *Rc = JudyHSIns(&dict->index.JudyHSArray, (void *)name, name_len, &J_Error); if (unlikely(Rc == PJERR)) { - error("DICTIONARY: Cannot insert entry with name '%s' to JudyHS, JU_ERRNO_* == %u, ID == %d", - name, JU_ERRNO(&J_Error), JU_ERRID(&J_Error)); + netdata_log_error("DICTIONARY: Cannot insert entry with name '%s' to JudyHS, JU_ERRNO_* == %u, ID == %d", + name, JU_ERRNO(&J_Error), JU_ERRID(&J_Error)); } // if *Rc == 0, new item added to the array @@ -1100,8 +1100,9 @@ static inline int hashtable_delete_unsafe(DICTIONARY *dict, const char *name, si JError_t J_Error; int ret = JudyHSDel(&dict->index.JudyHSArray, (void *)name, name_len, &J_Error); if(unlikely(ret == JERR)) { - error("DICTIONARY: Cannot delete entry with name '%s' from JudyHS, JU_ERRNO_* == %u, ID == %d", name, - JU_ERRNO(&J_Error), JU_ERRID(&J_Error)); + netdata_log_error("DICTIONARY: Cannot delete entry with name '%s' from JudyHS, JU_ERRNO_* == %u, ID == %d", + name, + JU_ERRNO(&J_Error), JU_ERRID(&J_Error)); return 0; } @@ -1573,7 +1574,9 @@ static bool dict_item_del(DICTIONARY *dict, const char *name, ssize_t name_len) } else { if(hashtable_delete_unsafe(dict, name, name_len, item) == 0) - error("DICTIONARY: INTERNAL ERROR: tried to delete item with name '%s', name_len %zd that is not in the index", name, name_len - 1); + netdata_log_error("DICTIONARY: INTERNAL ERROR: tried to delete item with name '%s', name_len %zd that is not in the index", + name, + name_len - 1); else pointer_del(dict, item); @@ -1668,7 +1671,7 @@ static DICTIONARY_ITEM *dict_item_add_or_reset_value_and_acquire(DICTIONARY *dic // view dictionary // the item is already there and can be used if(item->shared != master_item->shared) - error("DICTIONARY: changing the master item on a view is not supported. The previous item will remain. To change the key of an item in a view, delete it and add it again."); + netdata_log_error("DICTIONARY: changing the master item on a view is not supported. The previous item will remain. To change the key of an item in a view, delete it and add it again."); } else { // master dictionary @@ -2555,8 +2558,8 @@ void thread_cache_destroy(void) { JError_t J_Error; Word_t ret = JudyHSFreeArray(&thread_cache_judy_array, &J_Error); if(unlikely(ret == (Word_t) JERR)) { - error("THREAD_CACHE: Cannot destroy JudyHS, JU_ERRNO_* == %u, ID == %d", - JU_ERRNO(&J_Error), JU_ERRID(&J_Error)); + netdata_log_error("THREAD_CACHE: Cannot destroy JudyHS, JU_ERRNO_* == %u, ID == %d", + JU_ERRNO(&J_Error), JU_ERRID(&J_Error)); } internal_error(true, "THREAD_CACHE: hash table freed %lu bytes", ret); diff --git a/libnetdata/ebpf/ebpf.c b/libnetdata/ebpf/ebpf.c index a66b5998a9..c41a0021ee 100644 --- a/libnetdata/ebpf/ebpf.c +++ b/libnetdata/ebpf/ebpf.c @@ -477,7 +477,7 @@ void ebpf_update_kernel_memory(ebpf_plugin_stats_t *report, ebpf_local_maps_t *m snprintfz(filename, FILENAME_MAX, "/proc/self/fdinfo/%d", map->map_fd); procfile *ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT); if(unlikely(!ff)) { - error("Cannot open %s", filename); + netdata_log_error("Cannot open %s", filename); return; } @@ -613,7 +613,7 @@ void ebpf_update_map_size(struct bpf_map *map, ebpf_local_maps_t *lmap, ebpf_mod void ebpf_update_map_type(struct bpf_map *map, ebpf_local_maps_t *w) { if (bpf_map__set_type(map, w->map_type)) { - error("Cannot modify map type for %s", w->name); + netdata_log_error("Cannot modify map type for %s", w->name); } } @@ -794,7 +794,7 @@ void ebpf_update_controller(int fd, ebpf_module_t *em) for (key = NETDATA_CONTROLLER_APPS_ENABLED; key < end; key++) { int ret = bpf_map_update_elem(fd, &key, &values[key], 0); if (ret) - error("Add key(%u) for controller table failed.", key); + netdata_log_error("Add key(%u) for controller table failed.", key); } } @@ -867,7 +867,7 @@ struct bpf_link **ebpf_load_program(char *plugins_dir, ebpf_module_t *em, int kv ebpf_update_legacy_map(*obj, em); if (bpf_object__load(*obj)) { - error("ERROR: loading BPF object file failed %s\n", lpath); + netdata_log_error("ERROR: loading BPF object file failed %s\n", lpath); bpf_object__close(*obj); return NULL; } @@ -891,7 +891,7 @@ char *ebpf_find_symbol(char *search) snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, NETDATA_KALLSYMS); procfile *ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT); if(unlikely(!ff)) { - error("Cannot open %s%s", netdata_configured_host_prefix, NETDATA_KALLSYMS); + netdata_log_error("Cannot open %s%s", netdata_configured_host_prefix, NETDATA_KALLSYMS); return ret; } @@ -1295,7 +1295,7 @@ void ebpf_update_module(ebpf_module_t *em, struct btf *btf_file, int kver, int i if (!ebpf_load_config(em->cfg, filename)) { ebpf_mount_config_name(filename, FILENAME_MAX, ebpf_stock_config_dir, em->config_file); if (!ebpf_load_config(em->cfg, filename)) { - error("Cannot load the ebpf configuration file %s", em->config_file); + netdata_log_error("Cannot load the ebpf configuration file %s", em->config_file); return; } // If user defined data globally, we will have here EBPF_LOADED_FROM_USER, we need to consider this, to avoid @@ -1512,7 +1512,7 @@ int ebpf_is_tracepoint_enabled(char *subsys, char *eventname) static int ebpf_change_tracing_values(char *subsys, char *eventname, char *value) { if (strcmp("0", value) && strcmp("1", value)) { - error("Invalid value given to either enable or disable a tracepoint."); + netdata_log_error("Invalid value given to either enable or disable a tracepoint."); return -1; } diff --git a/libnetdata/eval/eval.c b/libnetdata/eval/eval.c index 6bf206febe..365f4ab523 100644 --- a/libnetdata/eval/eval.c +++ b/libnetdata/eval/eval.c @@ -1129,14 +1129,14 @@ EVAL_EXPRESSION *expression_parse(const char *string, const char **failed_at, in if(!op) { unsigned long pos = s - string + 1; - error("failed to parse expression '%s': %s at character %lu (i.e.: '%s').", string, expression_strerror(err), pos, s); + netdata_log_error("failed to parse expression '%s': %s at character %lu (i.e.: '%s').", string, expression_strerror(err), pos, s); return NULL; } BUFFER *out = buffer_create(1024, NULL); print_parsed_as_node(out, op, &err); if(err != EVAL_ERROR_OK) { - error("failed to re-generate expression '%s' with reason: %s", string, expression_strerror(err)); + netdata_log_error("failed to re-generate expression '%s' with reason: %s", string, expression_strerror(err)); eval_node_free(op); buffer_free(out); return NULL; diff --git a/libnetdata/health/health.c b/libnetdata/health/health.c index d5403cefa4..505df4e5ab 100644 --- a/libnetdata/health/health.c +++ b/libnetdata/health/health.c @@ -73,7 +73,7 @@ SILENCER *health_silencers_addparam(SILENCER *silencer, char *key, char *value) ) { silencer = create_silencer(); if(!silencer) { - error("Cannot add a new silencer to Netdata"); + netdata_log_error("Cannot add a new silencer to Netdata"); return NULL; } } diff --git a/libnetdata/json/json.c b/libnetdata/json/json.c index a4c4fc3047..ec1452911a 100644 --- a/libnetdata/json/json.c +++ b/libnetdata/json/json.c @@ -22,13 +22,13 @@ int json_tokens = JSON_TOKENS; #ifdef ENABLE_JSONC json_object *json_tokenise(char *js) { if(!js) { - error("JSON: json string is empty."); + netdata_log_error("JSON: json string is empty."); return NULL; } json_object *token = json_tokener_parse(js); if(!token) { - error("JSON: Invalid json string."); + netdata_log_error("JSON: Invalid json string."); return NULL; } @@ -39,7 +39,7 @@ jsmntok_t *json_tokenise(char *js, size_t len, size_t *count) { int n = json_tokens; if(!js || !len) { - error("JSON: json string is empty."); + netdata_log_error("JSON: json string is empty."); return NULL; } @@ -62,12 +62,12 @@ jsmntok_t *json_tokenise(char *js, size_t len, size_t *count) } if (ret == JSMN_ERROR_INVAL) { - error("JSON: Invalid json string."); + netdata_log_error("JSON: Invalid json string."); freez(tokens); return NULL; } else if (ret == JSMN_ERROR_PART) { - error("JSON: Truncated JSON string."); + netdata_log_error("JSON: Truncated JSON string."); freez(tokens); return NULL; } diff --git a/libnetdata/libnetdata.c b/libnetdata/libnetdata.c index 84878a118c..0174d4d3d7 100644 --- a/libnetdata/libnetdata.c +++ b/libnetdata/libnetdata.c @@ -370,7 +370,7 @@ static struct malloc_header *malloc_get_header(void *ptr, const char *caller, co struct malloc_header *t = (struct malloc_header *)ret; if(t->signature.magic != 0x0BADCAFE) { - error("pointer %p is not our pointer (called %s() from %zu@%s, %s()).", ptr, caller, line, file, function); + netdata_log_error("pointer %p is not our pointer (called %s() from %zu@%s, %s()).", ptr, caller, line, file, function); return NULL; } @@ -1050,13 +1050,16 @@ static int memory_file_open(const char *filename, size_t size) { if (lseek(fd, size, SEEK_SET) == (off_t) size) { if (write(fd, "", 1) == 1) { if (ftruncate(fd, size)) - error("Cannot truncate file '%s' to size %zu. Will use the larger file.", filename, size); + netdata_log_error("Cannot truncate file '%s' to size %zu. Will use the larger file.", filename, size); } - else error("Cannot write to file '%s' at position %zu.", filename, size); + else + netdata_log_error("Cannot write to file '%s' at position %zu.", filename, size); } - else error("Cannot seek file '%s' to size %zu.", filename, size); + else + netdata_log_error("Cannot seek file '%s' to size %zu.", filename, size); } - else error("Cannot create/open file '%s'.", filename); + else + netdata_log_error("Cannot create/open file '%s'.", filename); return fd; } @@ -1065,7 +1068,8 @@ inline int madvise_sequential(void *mem, size_t len) { static int logger = 1; int ret = madvise(mem, len, MADV_SEQUENTIAL); - if (ret != 0 && logger-- > 0) error("madvise(MADV_SEQUENTIAL) failed."); + if (ret != 0 && logger-- > 0) + netdata_log_error("madvise(MADV_SEQUENTIAL) failed."); return ret; } @@ -1073,7 +1077,8 @@ inline int madvise_random(void *mem, size_t len) { static int logger = 1; int ret = madvise(mem, len, MADV_RANDOM); - if (ret != 0 && logger-- > 0) error("madvise(MADV_RANDOM) failed."); + if (ret != 0 && logger-- > 0) + netdata_log_error("madvise(MADV_RANDOM) failed."); return ret; } @@ -1081,7 +1086,8 @@ inline int madvise_dontfork(void *mem, size_t len) { static int logger = 1; int ret = madvise(mem, len, MADV_DONTFORK); - if (ret != 0 && logger-- > 0) error("madvise(MADV_DONTFORK) failed."); + if (ret != 0 && logger-- > 0) + netdata_log_error("madvise(MADV_DONTFORK) failed."); return ret; } @@ -1089,7 +1095,8 @@ inline int madvise_willneed(void *mem, size_t len) { static int logger = 1; int ret = madvise(mem, len, MADV_WILLNEED); - if (ret != 0 && logger-- > 0) error("madvise(MADV_WILLNEED) failed."); + if (ret != 0 && logger-- > 0) + netdata_log_error("madvise(MADV_WILLNEED) failed."); return ret; } @@ -1097,7 +1104,8 @@ inline int madvise_dontneed(void *mem, size_t len) { static int logger = 1; int ret = madvise(mem, len, MADV_DONTNEED); - if (ret != 0 && logger-- > 0) error("madvise(MADV_DONTNEED) failed."); + if (ret != 0 && logger-- > 0) + netdata_log_error("madvise(MADV_DONTNEED) failed."); return ret; } @@ -1106,7 +1114,8 @@ inline int madvise_dontdump(void *mem __maybe_unused, size_t len __maybe_unused) static int logger = 1; int ret = madvise(mem, len, MADV_DONTDUMP); - if (ret != 0 && logger-- > 0) error("madvise(MADV_DONTDUMP) failed."); + if (ret != 0 && logger-- > 0) + netdata_log_error("madvise(MADV_DONTDUMP) failed."); return ret; #else return 0; @@ -1118,7 +1127,8 @@ inline int madvise_mergeable(void *mem __maybe_unused, size_t len __maybe_unused static int logger = 1; int ret = madvise(mem, len, MADV_MERGEABLE); - if (ret != 0 && logger-- > 0) error("madvise(MADV_MERGEABLE) failed."); + if (ret != 0 && logger-- > 0) + netdata_log_error("madvise(MADV_MERGEABLE) failed."); return ret; #else return 0; @@ -1215,12 +1225,12 @@ int memory_file_save(const char *filename, void *mem, size_t size) { int fd = open(tmpfilename, O_RDWR | O_CREAT | O_NOATIME, 0664); if (fd < 0) { - error("Cannot create/open file '%s'.", filename); + netdata_log_error("Cannot create/open file '%s'.", filename); return -1; } if (write(fd, mem, size) != (ssize_t) size) { - error("Cannot write to file '%s' %ld bytes.", filename, (long) size); + netdata_log_error("Cannot write to file '%s' %ld bytes.", filename, (long) size); close(fd); return -1; } @@ -1228,7 +1238,7 @@ int memory_file_save(const char *filename, void *mem, size_t size) { close(fd); if (rename(tmpfilename, filename)) { - error("Cannot rename '%s' to '%s'", tmpfilename, filename); + netdata_log_error("Cannot rename '%s' to '%s'", tmpfilename, filename); return -1; } @@ -1298,7 +1308,7 @@ unsigned long end_tsc(void) { int recursively_delete_dir(const char *path, const char *reason) { DIR *dir = opendir(path); if(!dir) { - error("Cannot read %s directory to be deleted '%s'", reason?reason:"", path); + netdata_log_error("Cannot read %s directory to be deleted '%s'", reason?reason:"", path); return -1; } @@ -1323,14 +1333,14 @@ int recursively_delete_dir(const char *path, const char *reason) { netdata_log_info("Deleting %s file '%s'", reason?reason:"", fullpath); if(unlikely(unlink(fullpath) == -1)) - error("Cannot delete %s file '%s'", reason?reason:"", fullpath); + netdata_log_error("Cannot delete %s file '%s'", reason?reason:"", fullpath); else ret++; } netdata_log_info("Deleting empty directory '%s'", path); if(unlikely(rmdir(path) == -1)) - error("Cannot delete empty directory '%s'", path); + netdata_log_error("Cannot delete empty directory '%s'", path); else ret++; @@ -1404,7 +1414,7 @@ int verify_netdata_host_prefix() { return 0; failed: - error("Ignoring host prefix '%s': path '%s' %s", netdata_configured_host_prefix, path, reason); + netdata_log_error("Ignoring host prefix '%s': path '%s' %s", netdata_configured_host_prefix, path, reason); netdata_configured_host_prefix = ""; return -1; } @@ -1512,7 +1522,7 @@ int path_is_file(const char *path, const char *subpath) { void recursive_config_double_dir_load(const char *user_path, const char *stock_path, const char *subpath, int (*callback)(const char *filename, void *data), void *data, size_t depth) { if(depth > 3) { - error("CONFIG: Max directory depth reached while reading user path '%s', stock path '%s', subpath '%s'", user_path, stock_path, subpath); + netdata_log_error("CONFIG: Max directory depth reached while reading user path '%s', stock path '%s', subpath '%s'", user_path, stock_path, subpath); return; } @@ -1523,7 +1533,7 @@ void recursive_config_double_dir_load(const char *user_path, const char *stock_p DIR *dir = opendir(udir); if (!dir) { - error("CONFIG cannot open user-config directory '%s'.", udir); + netdata_log_error("CONFIG cannot open user-config directory '%s'.", udir); } else { struct dirent *de = NULL; @@ -1565,7 +1575,7 @@ void recursive_config_double_dir_load(const char *user_path, const char *stock_p dir = opendir(sdir); if (!dir) { - error("CONFIG cannot open stock config directory '%s'.", sdir); + netdata_log_error("CONFIG cannot open stock config directory '%s'.", sdir); } else { if (strcmp(udir, sdir)) { @@ -1741,7 +1751,7 @@ bool run_command_and_copy_output_to_stdout(const char *command, int max_line_len fprintf(stdout, "%s", buffer); } else { - error("Failed to execute command '%s'.", command); + netdata_log_error("Failed to execute command '%s'.", command); return false; } @@ -1759,7 +1769,7 @@ void for_each_open_fd(OPEN_FD_ACTION action, OPEN_FD_EXCLUDE excluded_fds){ if(!(excluded_fds & OPEN_FD_EXCLUDE_STDERR)) (void)close(STDERR_FILENO); #if defined(HAVE_CLOSE_RANGE) if(close_range(STDERR_FILENO + 1, ~0U, 0) == 0) return; - error("close_range() failed, will try to close fds one by one"); + netdata_log_error("close_range() failed, will try to close fds one by one"); #endif break; case OPEN_FD_ACTION_FD_CLOEXEC: @@ -1768,7 +1778,7 @@ void for_each_open_fd(OPEN_FD_ACTION action, OPEN_FD_EXCLUDE excluded_fds){ if(!(excluded_fds & OPEN_FD_EXCLUDE_STDERR)) (void)fcntl(STDERR_FILENO, F_SETFD, FD_CLOEXEC); #if defined(HAVE_CLOSE_RANGE) && defined(CLOSE_RANGE_CLOEXEC) // Linux >= 5.11, FreeBSD >= 13.1 if(close_range(STDERR_FILENO + 1, ~0U, CLOSE_RANGE_CLOEXEC) == 0) return; - error("close_range() failed, will try to mark fds for closing one by one"); + netdata_log_error("close_range() failed, will try to mark fds for closing one by one"); #endif break; default: diff --git a/libnetdata/libnetdata.h b/libnetdata/libnetdata.h index 8d87745f22..4449ec4bca 100644 --- a/libnetdata/libnetdata.h +++ b/libnetdata/libnetdata.h @@ -601,7 +601,7 @@ char *find_and_replace(const char *src, const char *find, const char *replace, c #define UNUSED_FUNCTION(x) UNUSED_##x #endif -#define error_report(x, args...) do { errno = 0; error(x, ##args); } while(0) +#define error_report(x, args...) do { errno = 0; netdata_log_error(x, ##args); } while(0) // Taken from linux kernel #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) diff --git a/libnetdata/locks/locks.c b/libnetdata/locks/locks.c index 0c02152f79..7c806688d9 100644 --- a/libnetdata/locks/locks.c +++ b/libnetdata/locks/locks.c @@ -33,7 +33,7 @@ inline void netdata_thread_disable_cancelability(void) { int ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old); if(ret != 0) - error("THREAD_CANCELABILITY: pthread_setcancelstate() on thread %s returned error %d", + netdata_log_error("THREAD_CANCELABILITY: pthread_setcancelstate() on thread %s returned error %d", netdata_thread_tag(), ret); netdata_thread_first_cancelability = old; @@ -46,9 +46,9 @@ inline void netdata_thread_enable_cancelability(void) { if(unlikely(netdata_thread_nested_disables < 1)) { internal_fatal(true, "THREAD_CANCELABILITY: trying to enable cancelability, but it was not not disabled"); - error("THREAD_CANCELABILITY: netdata_thread_enable_cancelability(): invalid thread cancelability count %d " - "on thread %s - results will be undefined - please report this!", - netdata_thread_nested_disables, netdata_thread_tag()); + netdata_log_error("THREAD_CANCELABILITY: netdata_thread_enable_cancelability(): invalid thread cancelability count %d " + "on thread %s - results will be undefined - please report this!", + netdata_thread_nested_disables, netdata_thread_tag()); netdata_thread_nested_disables = 1; } @@ -57,15 +57,18 @@ inline void netdata_thread_enable_cancelability(void) { int old = 1; int ret = pthread_setcancelstate(netdata_thread_first_cancelability, &old); if(ret != 0) - error("THREAD_CANCELABILITY: pthread_setcancelstate() on thread %s returned error %d", netdata_thread_tag(), ret); + netdata_log_error("THREAD_CANCELABILITY: pthread_setcancelstate() on thread %s returned error %d", + netdata_thread_tag(), + ret); else { if(old != PTHREAD_CANCEL_DISABLE) { internal_fatal(true, "THREAD_CANCELABILITY: invalid old state cancelability"); - error("THREAD_CANCELABILITY: netdata_thread_enable_cancelability(): old thread cancelability " - "on thread %s was changed, expected DISABLED (%d), found %s (%d) - please report this!", - netdata_thread_tag(), PTHREAD_CANCEL_DISABLE, - (old == PTHREAD_CANCEL_ENABLE) ? "ENABLED" : "UNKNOWN", old); + netdata_log_error("THREAD_CANCELABILITY: netdata_thread_enable_cancelability(): old thread cancelability " + "on thread %s was changed, expected DISABLED (%d), found %s (%d) - please report this!", + netdata_thread_tag(), PTHREAD_CANCEL_DISABLE, + (old == PTHREAD_CANCEL_ENABLE) ? "ENABLED" : "UNKNOWN", + old); } } } @@ -79,14 +82,14 @@ inline void netdata_thread_enable_cancelability(void) { int __netdata_mutex_init(netdata_mutex_t *mutex) { int ret = pthread_mutex_init(mutex, NULL); if(unlikely(ret != 0)) - error("MUTEX_LOCK: failed to initialize (code %d).", ret); + netdata_log_error("MUTEX_LOCK: failed to initialize (code %d).", ret); return ret; } int __netdata_mutex_destroy(netdata_mutex_t *mutex) { int ret = pthread_mutex_destroy(mutex); if(unlikely(ret != 0)) - error("MUTEX_LOCK: failed to destroy (code %d).", ret); + netdata_log_error("MUTEX_LOCK: failed to destroy (code %d).", ret); return ret; } @@ -96,7 +99,7 @@ int __netdata_mutex_lock(netdata_mutex_t *mutex) { int ret = pthread_mutex_lock(mutex); if(unlikely(ret != 0)) { netdata_thread_enable_cancelability(); - error("MUTEX_LOCK: failed to get lock (code %d)", ret); + netdata_log_error("MUTEX_LOCK: failed to get lock (code %d)", ret); } else netdata_locks_acquired_mutexes++; @@ -119,7 +122,7 @@ int __netdata_mutex_trylock(netdata_mutex_t *mutex) { int __netdata_mutex_unlock(netdata_mutex_t *mutex) { int ret = pthread_mutex_unlock(mutex); if(unlikely(ret != 0)) - error("MUTEX_LOCK: failed to unlock (code %d).", ret); + netdata_log_error("MUTEX_LOCK: failed to unlock (code %d).", ret); else { netdata_locks_acquired_mutexes--; netdata_thread_enable_cancelability(); @@ -211,14 +214,14 @@ int netdata_mutex_unlock_debug(const char *file __maybe_unused, const char *func int __netdata_rwlock_destroy(netdata_rwlock_t *rwlock) { int ret = pthread_rwlock_destroy(&rwlock->rwlock_t); if(unlikely(ret != 0)) - error("RW_LOCK: failed to destroy lock (code %d)", ret); + netdata_log_error("RW_LOCK: failed to destroy lock (code %d)", ret); return ret; } int __netdata_rwlock_init(netdata_rwlock_t *rwlock) { int ret = pthread_rwlock_init(&rwlock->rwlock_t, NULL); if(unlikely(ret != 0)) - error("RW_LOCK: failed to initialize lock (code %d)", ret); + netdata_log_error("RW_LOCK: failed to initialize lock (code %d)", ret); return ret; } @@ -228,7 +231,7 @@ int __netdata_rwlock_rdlock(netdata_rwlock_t *rwlock) { int ret = pthread_rwlock_rdlock(&rwlock->rwlock_t); if(unlikely(ret != 0)) { netdata_thread_enable_cancelability(); - error("RW_LOCK: failed to obtain read lock (code %d)", ret); + netdata_log_error("RW_LOCK: failed to obtain read lock (code %d)", ret); } else netdata_locks_acquired_rwlocks++; @@ -241,7 +244,7 @@ int __netdata_rwlock_wrlock(netdata_rwlock_t *rwlock) { int ret = pthread_rwlock_wrlock(&rwlock->rwlock_t); if(unlikely(ret != 0)) { - error("RW_LOCK: failed to obtain write lock (code %d)", ret); + netdata_log_error("RW_LOCK: failed to obtain write lock (code %d)", ret); netdata_thread_enable_cancelability(); } else @@ -253,7 +256,7 @@ int __netdata_rwlock_wrlock(netdata_rwlock_t *rwlock) { int __netdata_rwlock_unlock(netdata_rwlock_t *rwlock) { int ret = pthread_rwlock_unlock(&rwlock->rwlock_t); if(unlikely(ret != 0)) - error("RW_LOCK: failed to release lock (code %d)", ret); + netdata_log_error("RW_LOCK: failed to release lock (code %d)", ret); else { netdata_thread_enable_cancelability(); netdata_locks_acquired_rwlocks--; diff --git a/libnetdata/log/log.c b/libnetdata/log/log.c index 02b9edc8f5..54543f6f23 100644 --- a/libnetdata/log/log.c +++ b/libnetdata/log/log.c @@ -530,7 +530,7 @@ static FILE *open_log_file(int fd, FILE *fp, const char *filename, int *enabled_ else { f = open(filename, O_WRONLY | O_APPEND | O_CREAT, 0664); if(f == -1) { - error("Cannot open file '%s'. Leaving %d to its default.", filename, fd); + netdata_log_error("Cannot open file '%s'. Leaving %d to its default.", filename, fd); if(fd_ptr) *fd_ptr = fd; return fp; } @@ -550,7 +550,7 @@ static FILE *open_log_file(int fd, FILE *fp, const char *filename, int *enabled_ // it automatically closes int t = dup2(f, fd); if (t == -1) { - error("Cannot dup2() new fd %d to old fd %d for '%s'", f, fd, filename); + netdata_log_error("Cannot dup2() new fd %d to old fd %d for '%s'", f, fd, filename); close(f); if(fd_ptr) *fd_ptr = fd; return fp; @@ -563,10 +563,10 @@ static FILE *open_log_file(int fd, FILE *fp, const char *filename, int *enabled_ if(!fp) { fp = fdopen(fd, "a"); if (!fp) - error("Cannot fdopen() fd %d ('%s')", fd, filename); + netdata_log_error("Cannot fdopen() fd %d ('%s')", fd, filename); else { if (setvbuf(fp, NULL, _IOLBF, 0) != 0) - error("Cannot set line buffering on fd %d ('%s')", fd, filename); + netdata_log_error("Cannot set line buffering on fd %d ('%s')", fd, filename); } } diff --git a/libnetdata/log/log.h b/libnetdata/log/log.h index 8c14650406..53de391d54 100644 --- a/libnetdata/log/log.h +++ b/libnetdata/log/log.h @@ -121,7 +121,7 @@ typedef struct error_with_limit { #define netdata_log_info(args...) info_int(0, __FILE__, __FUNCTION__, __LINE__, ##args) #define collector_info(args...) info_int(1, __FILE__, __FUNCTION__, __LINE__, ##args) #define infoerr(args...) error_int(0, "INFO", __FILE__, __FUNCTION__, __LINE__, ##args) -#define error(args...) error_int(0, "ERROR", __FILE__, __FUNCTION__, __LINE__, ##args) +#define netdata_log_error(args...) error_int(0, "ERROR", __FILE__, __FUNCTION__, __LINE__, ##args) #define collector_infoerr(args...) error_int(1, "INFO", __FILE__, __FUNCTION__, __LINE__, ##args) #define collector_error(args...) error_int(1, "ERROR", __FILE__, __FUNCTION__, __LINE__, ##args) #define error_limit(erl, args...) error_limit_int(erl, "ERROR", __FILE__, __FUNCTION__, __LINE__, ##args) diff --git a/libnetdata/onewayalloc/onewayalloc.c b/libnetdata/onewayalloc/onewayalloc.c index 9c6a916803..05c9f2a9dd 100644 --- a/libnetdata/onewayalloc/onewayalloc.c +++ b/libnetdata/onewayalloc/onewayalloc.c @@ -176,7 +176,7 @@ void onewayalloc_freez(ONEWAYALLOC *owa __maybe_unused, const void *ptr __maybe_ // not found - it is not ours // let's free it with the system allocator - error("ONEWAYALLOC: request to free address 0x%p that is not allocated by this OWA", ptr); + netdata_log_error("ONEWAYALLOC: request to free address 0x%p that is not allocated by this OWA", ptr); #endif return; diff --git a/libnetdata/os.c b/libnetdata/os.c index 133c02248d..d4d9e201e6 100644 --- a/libnetdata/os.c +++ b/libnetdata/os.c @@ -28,7 +28,7 @@ long get_system_cpus_with_cache(bool cache, bool for_netdata) { bool error = false; if (unlikely(GETSYSCTL_BY_NAME(HW_CPU_NAME, tmp_processors))) - error = true; + netdata_log_error = true; else processors[index] = tmp_processors; @@ -36,7 +36,7 @@ long get_system_cpus_with_cache(bool cache, bool for_netdata) { processors[index] = 1; if(error) - error("Assuming system has %d processors.", processors[index]); + netdata_log_error("Assuming system has %d processors.", processors[index]); } return processors[index]; @@ -49,14 +49,14 @@ long get_system_cpus_with_cache(bool cache, bool for_netdata) { procfile *ff = procfile_open(filename, NULL, PROCFILE_FLAG_DEFAULT); if(!ff) { processors[index] = 1; - error("Cannot open file '%s'. Assuming system has %ld processors.", filename, processors[index]); + netdata_log_error("Cannot open file '%s'. Assuming system has %ld processors.", filename, processors[index]); return processors[index]; } ff = procfile_readall(ff); if(!ff) { processors[index] = 1; - error("Cannot open file '%s'. Assuming system has %ld processors.", filename, processors[index]); + netdata_log_error("Cannot open file '%s'. Assuming system has %ld processors.", filename, processors[index]); return processors[index]; } @@ -93,7 +93,7 @@ pid_t get_system_pid_max(void) { if (unlikely(GETSYSCTL_BY_NAME("kern.pid_max", tmp_pid_max))) { pid_max = 99999; - error("Assuming system's maximum pid is %d.", pid_max); + netdata_log_error("Assuming system's maximum pid is %d.", pid_max); } else { pid_max = tmp_pid_max; } @@ -110,12 +110,12 @@ pid_t get_system_pid_max(void) { unsigned long long max = 0; if(read_single_number_file(filename, &max) != 0) { - error("Cannot open file '%s'. Assuming system supports %d pids.", filename, pid_max); + netdata_log_error("Cannot open file '%s'. Assuming system supports %d pids.", filename, pid_max); return pid_max; } if(!max) { - error("Cannot parse file '%s'. Assuming system supports %d pids.", filename, pid_max); + netdata_log_error("Cannot parse file '%s'. Assuming system supports %d pids.", filename, pid_max); return pid_max; } @@ -130,7 +130,7 @@ void get_system_HZ(void) { long ticks; if ((ticks = sysconf(_SC_CLK_TCK)) == -1) { - error("Cannot get system clock ticks"); + netdata_log_error("Cannot get system clock ticks"); } system_hz = (unsigned int) ticks; @@ -197,11 +197,11 @@ int getsysctl_by_name(const char *name, void *ptr, size_t len) { size_t nlen = len; if (unlikely(sysctlbyname(name, ptr, &nlen, NULL, 0) == -1)) { - error("FREEBSD: sysctl(%s...) failed: %s", name, strerror(errno)); + netdata_log_error("FREEBSD: sysctl(%s...) failed: %s", name, strerror(errno)); return 1; } if (unlikely(nlen != len)) { - error("FREEBSD: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)len, (unsigned long)nlen); + netdata_log_error("FREEBSD: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)len, (unsigned long)nlen); return 1; } return 0; @@ -215,11 +215,11 @@ int getsysctl_simple(const char *name, int *mib, size_t miblen, void *ptr, size_ return 1; if (unlikely(sysctl(mib, miblen, ptr, &nlen, NULL, 0) == -1)) { - error("FREEBSD: sysctl(%s...) failed: %s", name, strerror(errno)); + netdata_log_error("FREEBSD: sysctl(%s...) failed: %s", name, strerror(errno)); return 1; } if (unlikely(nlen != len)) { - error("FREEBSD: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)len, (unsigned long)nlen); + netdata_log_error("FREEBSD: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)len, (unsigned long)nlen); return 1; } @@ -234,11 +234,11 @@ int getsysctl(const char *name, int *mib, size_t miblen, void *ptr, size_t *len) return 1; if (unlikely(sysctl(mib, miblen, ptr, len, NULL, 0) == -1)) { - error("FREEBSD: sysctl(%s...) failed: %s", name, strerror(errno)); + netdata_log_error("FREEBSD: sysctl(%s...) failed: %s", name, strerror(errno)); return 1; } if (unlikely(ptr != NULL && nlen != *len)) { - error("FREEBSD: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)*len, (unsigned long)nlen); + netdata_log_error("FREEBSD: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)*len, (unsigned long)nlen); return 1; } @@ -249,11 +249,11 @@ int getsysctl_mib(const char *name, int *mib, size_t len) { size_t nlen = len; if (unlikely(sysctlnametomib(name, mib, &nlen) == -1)) { - error("FREEBSD: sysctl(%s...) failed: %s", name, strerror(errno)); + netdata_log_error("FREEBSD: sysctl(%s...) failed: %s", name, strerror(errno)); return 1; } if (unlikely(nlen != len)) { - error("FREEBSD: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)len, (unsigned long)nlen); + netdata_log_error("FREEBSD: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)len, (unsigned long)nlen); return 1; } return 0; @@ -274,11 +274,11 @@ int getsysctl_by_name(const char *name, void *ptr, size_t len) { size_t nlen = len; if (unlikely(sysctlbyname(name, ptr, &nlen, NULL, 0) == -1)) { - error("MACOS: sysctl(%s...) failed: %s", name, strerror(errno)); + netdata_log_error("MACOS: sysctl(%s...) failed: %s", name, strerror(errno)); return 1; } if (unlikely(nlen != len)) { - error("MACOS: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)len, (unsigned long)nlen); + netdata_log_error("MACOS: sysctl(%s...) expected %lu, got %lu", name, (unsigned long)len, (unsigned long)nlen); return 1; } return 0; diff --git a/libnetdata/popen/popen.c b/libnetdata/popen/popen.c index d21b481ae2..48e01056f2 100644 --- a/libnetdata/popen/popen.c +++ b/libnetdata/popen/popen.c @@ -55,7 +55,7 @@ static void netdata_popen_tracking_del_pid(pid_t pid) { freez(mp); } else - error("POPEN: Cannot find pid %d.", pid); + netdata_log_error("POPEN: Cannot find pid %d.", pid); netdata_popen_tracking_unlock(); } @@ -156,33 +156,33 @@ static int popene_internal(volatile pid_t *pidptr, char **env, uint8_t flags, FI unsigned int fds_to_exclude_from_closing = OPEN_FD_EXCLUDE_STDERR; if(posix_spawn_file_actions_init(&fa)) { - error("POPEN: posix_spawn_file_actions_init() failed."); + netdata_log_error("POPEN: posix_spawn_file_actions_init() failed."); ret = -1; goto set_return_values_and_return; } if(fpp_child_stdin) { if (pipe(pipefd_stdin) == -1) { - error("POPEN: stdin pipe() failed"); + netdata_log_error("POPEN: stdin pipe() failed"); ret = -1; goto cleanup_and_return; } if ((fp_child_stdin = fdopen(pipefd_stdin[PIPE_WRITE], "w")) == NULL) { - error("POPEN: fdopen() stdin failed"); + netdata_log_error("POPEN: fdopen() stdin failed"); ret = -1; goto cleanup_and_return; } if(posix_spawn_file_actions_adddup2(&fa, pipefd_stdin[PIPE_READ], STDIN_FILENO)) { - error("POPEN: posix_spawn_file_actions_adddup2() on stdin failed."); + netdata_log_error("POPEN: posix_spawn_file_actions_adddup2() on stdin failed."); ret = -1; goto cleanup_and_return; } } else { if (posix_spawn_file_actions_addopen(&fa, STDIN_FILENO, "/dev/null", O_RDONLY, 0)) { - error("POPEN: posix_spawn_file_actions_addopen() on stdin to /dev/null failed."); + netdata_log_error("POPEN: posix_spawn_file_actions_addopen() on stdin to /dev/null failed."); // this is not a fatal error fds_to_exclude_from_closing |= OPEN_FD_EXCLUDE_STDIN; } @@ -190,26 +190,26 @@ static int popene_internal(volatile pid_t *pidptr, char **env, uint8_t flags, FI if (fpp_child_stdout) { if (pipe(pipefd_stdout) == -1) { - error("POPEN: stdout pipe() failed"); + netdata_log_error("POPEN: stdout pipe() failed"); ret = -1; goto cleanup_and_return; } if ((fp_child_stdout = fdopen(pipefd_stdout[PIPE_READ], "r")) == NULL) { - error("POPEN: fdopen() stdout failed"); + netdata_log_error("POPEN: fdopen() stdout failed"); ret = -1; goto cleanup_and_return; } if(posix_spawn_file_actions_adddup2(&fa, pipefd_stdout[PIPE_WRITE], STDOUT_FILENO)) { - error("POPEN: posix_spawn_file_actions_adddup2() on stdout failed."); + netdata_log_error("POPEN: posix_spawn_file_actions_adddup2() on stdout failed."); ret = -1; goto cleanup_and_return; } } else { if (posix_spawn_file_actions_addopen(&fa, STDOUT_FILENO, "/dev/null", O_WRONLY, 0)) { - error("POPEN: posix_spawn_file_actions_addopen() on stdout to /dev/null failed."); + netdata_log_error("POPEN: posix_spawn_file_actions_addopen() on stdout to /dev/null failed."); // this is not a fatal error fds_to_exclude_from_closing |= OPEN_FD_EXCLUDE_STDOUT; } @@ -223,20 +223,20 @@ static int popene_internal(volatile pid_t *pidptr, char **env, uint8_t flags, FI attr_rc = posix_spawnattr_init(&attr); if(attr_rc) { // failed - error("POPEN: posix_spawnattr_init() failed."); + netdata_log_error("POPEN: posix_spawnattr_init() failed."); } else { // success // reset all signals in the child if (posix_spawnattr_setflags(&attr, POSIX_SPAWN_SETSIGMASK | POSIX_SPAWN_SETSIGDEF)) - error("POPEN: posix_spawnattr_setflags() failed."); + netdata_log_error("POPEN: posix_spawnattr_setflags() failed."); sigset_t mask; sigemptyset(&mask); if (posix_spawnattr_setsigmask(&attr, &mask)) - error("POPEN: posix_spawnattr_setsigmask() failed."); + netdata_log_error("POPEN: posix_spawnattr_setsigmask() failed."); } // Take the lock while we fork to ensure we don't race with SIGCHLD @@ -251,7 +251,7 @@ static int popene_internal(volatile pid_t *pidptr, char **env, uint8_t flags, FI else { // failure netdata_popen_tracking_unlock(); - error("POPEN: failed to spawn command: \"%s\" from parent pid %d.", command_to_be_logged, getpid()); + netdata_log_error("POPEN: failed to spawn command: \"%s\" from parent pid %d.", command_to_be_logged, getpid()); ret = -1; goto cleanup_and_return; } @@ -263,11 +263,11 @@ cleanup_and_return: if(!attr_rc) { // posix_spawnattr_init() succeeded if (posix_spawnattr_destroy(&attr)) - error("POPEN: posix_spawnattr_destroy() failed"); + netdata_log_error("POPEN: posix_spawnattr_destroy() failed"); } if (posix_spawn_file_actions_destroy(&fa)) - error("POPEN: posix_spawn_file_actions_destroy() failed"); + netdata_log_error("POPEN: posix_spawn_file_actions_destroy() failed"); // the child end - close it if(pipefd_stdin[PIPE_READ] != -1) @@ -401,7 +401,7 @@ int netdata_pclose(FILE *fp_child_input, FILE *fp_child_output, pid_t pid) { switch (info.si_code) { case CLD_EXITED: if(info.si_status) - error("child pid %d exited with code %d.", info.si_pid, info.si_status); + netdata_log_error("child pid %d exited with code %d.", info.si_pid, info.si_status); return(info.si_status); case CLD_KILLED: @@ -414,33 +414,33 @@ int netdata_pclose(FILE *fp_child_input, FILE *fp_child_output, pid_t pid) { return(0); } else { - error("child pid %d killed by signal %d.", info.si_pid, info.si_status); + netdata_log_error("child pid %d killed by signal %d.", info.si_pid, info.si_status); return(-1); } case CLD_DUMPED: - error("child pid %d core dumped by signal %d.", info.si_pid, info.si_status); + netdata_log_error("child pid %d core dumped by signal %d.", info.si_pid, info.si_status); return(-2); case CLD_STOPPED: - error("child pid %d stopped by signal %d.", info.si_pid, info.si_status); + netdata_log_error("child pid %d stopped by signal %d.", info.si_pid, info.si_status); return(0); case CLD_TRAPPED: - error("child pid %d trapped by signal %d.", info.si_pid, info.si_status); + netdata_log_error("child pid %d trapped by signal %d.", info.si_pid, info.si_status); return(-4); case CLD_CONTINUED: - error("child pid %d continued by signal %d.", info.si_pid, info.si_status); + netdata_log_error("child pid %d continued by signal %d.", info.si_pid, info.si_status); return(0); default: - error("child pid %d gave us a SIGCHLD with code %d and status %d.", info.si_pid, info.si_code, info.si_status); + netdata_log_error("child pid %d gave us a SIGCHLD with code %d and status %d.", info.si_pid, info.si_code, info.si_status); return(-5); } } else - error("Cannot waitid() for pid %d", pid); + netdata_log_error("Cannot waitid() for pid %d", pid); return 0; } diff --git a/libnetdata/procfile/procfile.c b/libnetdata/procfile/procfile.c index a73a9e2533..dd572e0e7e 100644 --- a/libnetdata/procfile/procfile.c +++ b/libnetdata/procfile/procfile.c @@ -297,7 +297,8 @@ procfile *procfile_readall(procfile *ff) { r = read(ff->fd, &ff->data[s], ff->size - s); if(unlikely(r == -1)) { if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) collector_error(PF_PREFIX ": Cannot read from file '%s' on fd %d", procfile_filename(ff), ff->fd); - else if(unlikely(ff->flags & PROCFILE_FLAG_ERROR_ON_ERROR_LOG)) error(PF_PREFIX ": Cannot read from file '%s' on fd %d", procfile_filename(ff), ff->fd); + else if(unlikely(ff->flags & PROCFILE_FLAG_ERROR_ON_ERROR_LOG)) + netdata_log_error(PF_PREFIX ": Cannot read from file '%s' on fd %d", procfile_filename(ff), ff->fd); procfile_close(ff); return NULL; } @@ -308,7 +309,8 @@ procfile *procfile_readall(procfile *ff) { // debug(D_PROCFILE, "Rewinding file '%s'", ff->filename); if(unlikely(lseek(ff->fd, 0, SEEK_SET) == -1)) { if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) collector_error(PF_PREFIX ": Cannot rewind on file '%s'.", procfile_filename(ff)); - else if(unlikely(ff->flags & PROCFILE_FLAG_ERROR_ON_ERROR_LOG)) error(PF_PREFIX ": Cannot rewind on file '%s'.", procfile_filename(ff)); + else if(unlikely(ff->flags & PROCFILE_FLAG_ERROR_ON_ERROR_LOG)) + netdata_log_error(PF_PREFIX ": Cannot rewind on file '%s'.", procfile_filename(ff)); procfile_close(ff); return NULL; } @@ -406,7 +408,8 @@ procfile *procfile_open(const char *filename, const char *separators, uint32_t f int fd = open(filename, procfile_open_flags, 0666); if(unlikely(fd == -1)) { if(unlikely(!(flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) collector_error(PF_PREFIX ": Cannot open file '%s'", filename); - else if(unlikely(flags & PROCFILE_FLAG_ERROR_ON_ERROR_LOG)) error(PF_PREFIX ": Cannot open file '%s'", filename); + else if(unlikely(flags & PROCFILE_FLAG_ERROR_ON_ERROR_LOG)) + netdata_log_error(PF_PREFIX ": Cannot open file '%s'", filename); return NULL; } diff --git a/libnetdata/socket/security.c b/libnetdata/socket/security.c index 72eb939210..56cb29330a 100644 --- a/libnetdata/socket/security.c +++ b/libnetdata/socket/security.c @@ -429,7 +429,7 @@ void netdata_ssl_initialize_openssl() { #else if (OPENSSL_init_ssl(OPENSSL_INIT_LOAD_CONFIG, NULL) != 1) { - error("SSL library cannot be initialized."); + netdata_log_error("SSL library cannot be initialized."); } #endif @@ -516,7 +516,7 @@ static SSL_CTX * netdata_ssl_create_server_ctx(unsigned long mode) { #if OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110 ctx = SSL_CTX_new(SSLv23_server_method()); if (!ctx) { - error("Cannot create a new SSL context, netdata won't encrypt communication"); + netdata_log_error("Cannot create a new SSL context, netdata won't encrypt communication"); return NULL; } @@ -524,7 +524,7 @@ static SSL_CTX * netdata_ssl_create_server_ctx(unsigned long mode) { #else ctx = SSL_CTX_new(TLS_server_method()); if (!ctx) { - error("Cannot create a new SSL context, netdata won't encrypt communication"); + netdata_log_error("Cannot create a new SSL context, netdata won't encrypt communication"); return NULL; } @@ -539,7 +539,7 @@ static SSL_CTX * netdata_ssl_create_server_ctx(unsigned long mode) { if(tls_ciphers && strcmp(tls_ciphers, "none") != 0) { if (!SSL_CTX_set_cipher_list(ctx, tls_ciphers)) { - error("SSL error. cannot set the cipher list"); + netdata_log_error("SSL error. cannot set the cipher list"); } } #endif @@ -548,7 +548,7 @@ static SSL_CTX * netdata_ssl_create_server_ctx(unsigned long mode) { if (!SSL_CTX_check_private_key(ctx)) { ERR_error_string_n(ERR_get_error(),lerror,sizeof(lerror)); - error("SSL cannot check the private key: %s",lerror); + netdata_log_error("SSL cannot check the private key: %s",lerror); SSL_CTX_free(ctx); return NULL; } @@ -680,7 +680,7 @@ int security_test_certificate(SSL *ssl) { { char error[512]; ERR_error_string_n(ERR_get_error(), error, sizeof(error)); - error("SSL RFC4158 check: We have a invalid certificate, the tests result with %ld and message %s", status, error); + netdata_log_error("SSL RFC4158 check: We have a invalid certificate, the tests result with %ld and message %s", status, error); ret = -1; } else { ret = 0; diff --git a/libnetdata/socket/socket.c b/libnetdata/socket/socket.c index 22e4e0a221..5bcb06834d 100644 --- a/libnetdata/socket/socket.c +++ b/libnetdata/socket/socket.c @@ -124,7 +124,7 @@ int sock_setnonblock(int fd) { int ret = fcntl(fd, F_SETFL, flags); if(ret < 0) - error("Failed to set O_NONBLOCK on socket %d", fd); + netdata_log_error("Failed to set O_NONBLOCK on socket %d", fd); return ret; } @@ -137,7 +137,7 @@ int sock_delnonblock(int fd) { int ret = fcntl(fd, F_SETFL, flags); if(ret < 0) - error("Failed to remove O_NONBLOCK on socket %d", fd); + netdata_log_error("Failed to remove O_NONBLOCK on socket %d", fd); return ret; } @@ -146,7 +146,7 @@ int sock_setreuse(int fd, int reuse) { int ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(reuse)); if(ret == -1) - error("Failed to set SO_REUSEADDR on socket %d", fd); + netdata_log_error("Failed to set SO_REUSEADDR on socket %d", fd); return ret; } @@ -157,7 +157,7 @@ int sock_setreuse_port(int fd, int reuse) { #ifdef SO_REUSEPORT ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &reuse, sizeof(reuse)); if(ret == -1 && errno != ENOPROTOOPT) - error("failed to set SO_REUSEPORT on socket %d", fd); + netdata_log_error("failed to set SO_REUSEPORT on socket %d", fd); #else ret = -1; #endif @@ -171,7 +171,7 @@ int sock_enlarge_in(int fd) { ret = setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &bs, sizeof(bs)); if(ret == -1) - error("Failed to set SO_RCVBUF on socket %d", fd); + netdata_log_error("Failed to set SO_RCVBUF on socket %d", fd); return ret; } @@ -181,7 +181,7 @@ int sock_enlarge_out(int fd) { ret = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &bs, sizeof(bs)); if(ret == -1) - error("Failed to set SO_SNDBUF on socket %d", fd); + netdata_log_error("Failed to set SO_SNDBUF on socket %d", fd); return ret; } @@ -220,7 +220,7 @@ int create_listen_socket_unix(const char *path, int listen_backlog) { sock = socket(AF_UNIX, SOCK_STREAM, 0); if(sock < 0) { - error("LISTENER: UNIX socket() on path '%s' failed.", path); + netdata_log_error("LISTENER: UNIX socket() on path '%s' failed.", path); return -1; } @@ -234,22 +234,22 @@ int create_listen_socket_unix(const char *path, int listen_backlog) { errno = 0; if (unlink(path) == -1 && errno != ENOENT) - error("LISTENER: failed to remove existing (probably obsolete or left-over) file on UNIX socket path '%s'.", path); + netdata_log_error("LISTENER: failed to remove existing (probably obsolete or left-over) file on UNIX socket path '%s'.", path); if(bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) { close(sock); - error("LISTENER: UNIX bind() on path '%s' failed.", path); + netdata_log_error("LISTENER: UNIX bind() on path '%s' failed.", path); return -1; } // we have to chmod this to 0777 so that the client will be able // to read from and write to this socket. if(chmod(path, 0777) == -1) - error("LISTENER: failed to chmod() socket file '%s'.", path); + netdata_log_error("LISTENER: failed to chmod() socket file '%s'.", path); if(listen(sock, listen_backlog) < 0) { close(sock); - error("LISTENER: UNIX listen() on path '%s' failed.", path); + netdata_log_error("LISTENER: UNIX listen() on path '%s' failed.", path); return -1; } @@ -264,7 +264,7 @@ int create_listen_socket4(int socktype, const char *ip, uint16_t port, int liste sock = socket(AF_INET, socktype, 0); if(sock < 0) { - error("LISTENER: IPv4 socket() on ip '%s' port %d, socktype %d failed.", ip, port, socktype); + netdata_log_error("LISTENER: IPv4 socket() on ip '%s' port %d, socktype %d failed.", ip, port, socktype); return -1; } @@ -280,20 +280,20 @@ int create_listen_socket4(int socktype, const char *ip, uint16_t port, int liste int ret = inet_pton(AF_INET, ip, (void *)&name.sin_addr.s_addr); if(ret != 1) { - error("LISTENER: Failed to convert IP '%s' to a valid IPv4 address.", ip); + netdata_log_error("LISTENER: Failed to convert IP '%s' to a valid IPv4 address.", ip); close(sock); return -1; } if(bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) { close(sock); - error("LISTENER: IPv4 bind() on ip '%s' port %d, socktype %d failed.", ip, port, socktype); + netdata_log_error("LISTENER: IPv4 bind() on ip '%s' port %d, socktype %d failed.", ip, port, socktype); return -1; } if(socktype == SOCK_STREAM && listen(sock, listen_backlog) < 0) { close(sock); - error("LISTENER: IPv4 listen() on ip '%s' port %d, socktype %d failed.", ip, port, socktype); + netdata_log_error("LISTENER: IPv4 listen() on ip '%s' port %d, socktype %d failed.", ip, port, socktype); return -1; } @@ -309,7 +309,7 @@ int create_listen_socket6(int socktype, uint32_t scope_id, const char *ip, int p sock = socket(AF_INET6, socktype, 0); if (sock < 0) { - error("LISTENER: IPv6 socket() on ip '%s' port %d, socktype %d, failed.", ip, port, socktype); + netdata_log_error("LISTENER: IPv6 socket() on ip '%s' port %d, socktype %d, failed.", ip, port, socktype); return -1; } @@ -320,7 +320,7 @@ int create_listen_socket6(int socktype, uint32_t scope_id, const char *ip, int p /* IPv6 only */ if(setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, (void*)&ipv6only, sizeof(ipv6only)) != 0) - error("LISTENER: Cannot set IPV6_V6ONLY on ip '%s' port %d, socktype %d.", ip, port, socktype); + netdata_log_error("LISTENER: Cannot set IPV6_V6ONLY on ip '%s' port %d, socktype %d.", ip, port, socktype); struct sockaddr_in6 name; memset(&name, 0, sizeof(struct sockaddr_in6)); @@ -330,7 +330,7 @@ int create_listen_socket6(int socktype, uint32_t scope_id, const char *ip, int p int ret = inet_pton(AF_INET6, ip, (void *)&name.sin6_addr.s6_addr); if(ret != 1) { - error("LISTENER: Failed to convert IP '%s' to a valid IPv6 address.", ip); + netdata_log_error("LISTENER: Failed to convert IP '%s' to a valid IPv6 address.", ip); close(sock); return -1; } @@ -339,13 +339,13 @@ int create_listen_socket6(int socktype, uint32_t scope_id, const char *ip, int p if (bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) { close(sock); - error("LISTENER: IPv6 bind() on ip '%s' port %d, socktype %d failed.", ip, port, socktype); + netdata_log_error("LISTENER: IPv6 bind() on ip '%s' port %d, socktype %d failed.", ip, port, socktype); return -1; } if (socktype == SOCK_STREAM && listen(sock, listen_backlog) < 0) { close(sock); - error("LISTENER: IPv6 listen() on ip '%s' port %d, socktype %d failed.", ip, port, socktype); + netdata_log_error("LISTENER: IPv6 listen() on ip '%s' port %d, socktype %d failed.", ip, port, socktype); return -1; } @@ -355,7 +355,7 @@ int create_listen_socket6(int socktype, uint32_t scope_id, const char *ip, int p static inline int listen_sockets_add(LISTEN_SOCKETS *sockets, int fd, int family, int socktype, const char *protocol, const char *ip, uint16_t port, int acl_flags) { if(sockets->opened >= MAX_LISTEN_FDS) { - error("LISTENER: Too many listening sockets. Failed to add listening %s socket at ip '%s' port %d, protocol %s, socktype %d", protocol, ip, port, protocol, socktype); + netdata_log_error("LISTENER: Too many listening sockets. Failed to add listening %s socket at ip '%s' port %d, protocol %s, socktype %d", protocol, ip, port, protocol, socktype); close(fd); return -1; } @@ -485,7 +485,7 @@ static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, protocol_str = "unix"; int fd = create_listen_socket_unix(path, listen_backlog); if (fd == -1) { - error("LISTENER: Cannot create unix socket '%s'", path); + netdata_log_error("LISTENER: Cannot create unix socket '%s'", path); sockets->failed++; } else { acl_flags = WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_REGISTRY | WEB_CLIENT_ACL_BADGE | WEB_CLIENT_ACL_MGMT | WEB_CLIENT_ACL_NETDATACONF | WEB_CLIENT_ACL_STREAMING | WEB_CLIENT_ACL_SSL_DEFAULT; @@ -551,7 +551,7 @@ static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, if(*interface) { scope_id = if_nametoindex(interface); if(!scope_id) - error("LISTENER: Cannot find a network interface named '%s'. Continuing with limiting the network interface", interface); + netdata_log_error("LISTENER: Cannot find a network interface named '%s'. Continuing with limiting the network interface", interface); } if(!*ip || *ip == '*' || !strcmp(ip, "any") || !strcmp(ip, "all")) @@ -571,7 +571,7 @@ static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, int r = getaddrinfo(ip, port, &hints, &result); if (r != 0) { - error("LISTENER: getaddrinfo('%s', '%s'): %s\n", ip, port, gai_strerror(r)); + netdata_log_error("LISTENER: getaddrinfo('%s', '%s'): %s\n", ip, port, gai_strerror(r)); return -1; } @@ -608,7 +608,7 @@ static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, } if (fd == -1) { - error("LISTENER: Cannot bind to ip '%s', port %d", rip, rport); + netdata_log_error("LISTENER: Cannot bind to ip '%s', port %d", rip, rport); sockets->failed++; } else { @@ -630,7 +630,7 @@ int listen_sockets_setup(LISTEN_SOCKETS *sockets) { long long int old_port = sockets->default_port; long long int new_port = appconfig_get_number(sockets->config, sockets->config_section, "default port", sockets->default_port); if(new_port < 1 || new_port > 65535) { - error("LISTENER: Invalid listen port %lld given. Defaulting to %lld.", new_port, old_port); + netdata_log_error("LISTENER: Invalid listen port %lld given. Defaulting to %lld.", new_port, old_port); sockets->default_port = (uint16_t) appconfig_set_number(sockets->config, sockets->config_section, "default port", old_port); } else sockets->default_port = (uint16_t)new_port; @@ -677,13 +677,13 @@ int listen_sockets_setup(LISTEN_SOCKETS *sockets) { static inline int connect_to_unix(const char *path, struct timeval *timeout) { int fd = socket(AF_UNIX, SOCK_STREAM, 0); if(fd == -1) { - error("Failed to create UNIX socket() for '%s'", path); + netdata_log_error("Failed to create UNIX socket() for '%s'", path); return -1; } if(timeout) { if(setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, (char *) timeout, sizeof(struct timeval)) < 0) - error("Failed to set timeout on UNIX socket '%s'", path); + netdata_log_error("Failed to set timeout on UNIX socket '%s'", path); } struct sockaddr_un addr; @@ -692,7 +692,7 @@ static inline int connect_to_unix(const char *path, struct timeval *timeout) { strncpy(addr.sun_path, path, sizeof(addr.sun_path)-1); if (connect(fd, (struct sockaddr*)&addr, sizeof(addr)) == -1) { - error("Cannot connect to UNIX socket on path '%s'.", path); + netdata_log_error("Cannot connect to UNIX socket on path '%s'.", path); close(fd); return -1; } @@ -723,7 +723,7 @@ int connect_to_this_ip46(int protocol, int socktype, const char *host, uint32_t int ai_err = getaddrinfo(host, service, &hints, &ai_head); if (ai_err != 0) { - error("Cannot resolve host '%s', port '%s': %s", host, service, gai_strerror(ai_err)); + netdata_log_error("Cannot resolve host '%s', port '%s': %s", host, service, gai_strerror(ai_err)); return -1; } @@ -804,7 +804,7 @@ int connect_to_this_ip46(int protocol, int socktype, const char *host, uint32_t if(fd != -1) { if(timeout) { if(setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, (char *) timeout, sizeof(struct timeval)) < 0) - error("Failed to set timeout on the socket to ip '%s' port '%s'", hostBfr, servBfr); + netdata_log_error("Failed to set timeout on the socket to ip '%s' port '%s'", hostBfr, servBfr); } errno = 0; @@ -828,26 +828,26 @@ int connect_to_this_ip46(int protocol, int socktype, const char *host, uint32_t } else { // This means that the socket is in error. We will close it and set fd to -1 - error("Failed to connect to '%s', port '%s'.", hostBfr, servBfr); + netdata_log_error("Failed to connect to '%s', port '%s'.", hostBfr, servBfr); close(fd); fd = -1; } } else if (ret == 0) { // poll() timed out, the connection is not established within the specified timeout. - error("Timed out while connecting to '%s', port '%s'.", hostBfr, servBfr); + netdata_log_error("Timed out while connecting to '%s', port '%s'.", hostBfr, servBfr); close(fd); fd = -1; } else { // poll() returned an error. - error("Failed to connect to '%s', port '%s'. poll() returned %d", hostBfr, servBfr, ret); + netdata_log_error("Failed to connect to '%s', port '%s'. poll() returned %d", hostBfr, servBfr, ret); close(fd); fd = -1; } } else { - error("Failed to connect to '%s', port '%s'", hostBfr, servBfr); + netdata_log_error("Failed to connect to '%s', port '%s'", hostBfr, servBfr); close(fd); fd = -1; } @@ -933,14 +933,14 @@ int connect_to_this(const char *definition, int default_port, struct timeval *ti debug(D_CONNECT_TO, "Attempting connection to host = '%s', service = '%s', interface = '%s', protocol = %d (tcp = %d, udp = %d)", host, service, interface, protocol, IPPROTO_TCP, IPPROTO_UDP); if(!*host) { - error("Definition '%s' does not specify a host.", definition); + netdata_log_error("Definition '%s' does not specify a host.", definition); return -1; } if(*interface) { scope_id = if_nametoindex(interface); if(!scope_id) - error("Cannot find a network interface named '%s'. Continuing with limiting the network interface", interface); + netdata_log_error("Cannot find a network interface named '%s'. Continuing with limiting the network interface", interface); } if(!*service) @@ -1125,7 +1125,7 @@ ssize_t send_timeout(int sockfd, void *buf, size_t len, int flags, int timeout) return netdata_ssl_write(ssl, buf, len); } else { - error("cannot write to SSL connection - connection is not ready."); + netdata_log_error("cannot write to SSL connection - connection is not ready."); return -1; } } @@ -1204,7 +1204,7 @@ int connection_allowed(int fd, char *client_ip, char *client_host, size_t hostsi if (err != 0 || (err = getnameinfo((struct sockaddr *)&sadr, addrlen, client_host, (socklen_t)hostsize, NULL, 0, NI_NAMEREQD)) != 0) { - error("Incoming %s on '%s' does not match a numeric pattern, and host could not be resolved (err=%s)", + netdata_log_error("Incoming %s on '%s' does not match a numeric pattern, and host could not be resolved (err=%s)", patname, client_ip, gai_strerror(err)); if (hostsize >= 8) strcpy(client_host,"UNKNOWN"); @@ -1212,7 +1212,7 @@ int connection_allowed(int fd, char *client_ip, char *client_host, size_t hostsi } struct addrinfo *addr_infos = NULL; if (getaddrinfo(client_host, NULL, NULL, &addr_infos) !=0 ) { - error("LISTENER: cannot validate hostname '%s' from '%s' by resolving it", + netdata_log_error("LISTENER: cannot validate hostname '%s' from '%s' by resolving it", client_host, client_ip); if (hostsize >= 8) strcpy(client_host,"UNKNOWN"); @@ -1240,7 +1240,7 @@ int connection_allowed(int fd, char *client_ip, char *client_host, size_t hostsi scan = scan->ai_next; } if (!validated) { - error("LISTENER: Cannot validate '%s' as ip of '%s', not listed in DNS", client_ip, client_host); + netdata_log_error("LISTENER: Cannot validate '%s' as ip of '%s', not listed in DNS", client_ip, client_host); if (hostsize >= 8) strcpy(client_host,"UNKNOWN"); } @@ -1266,7 +1266,7 @@ int accept_socket(int fd, int flags, char *client_ip, size_t ipsize, char *clien if (likely(nfd >= 0)) { if (getnameinfo((struct sockaddr *)&sadr, addrlen, client_ip, (socklen_t)ipsize, client_port, (socklen_t)portsize, NI_NUMERICHOST | NI_NUMERICSERV) != 0) { - error("LISTENER: cannot getnameinfo() on received client connection."); + netdata_log_error("LISTENER: cannot getnameinfo() on received client connection."); strncpyz(client_ip, "UNKNOWN", ipsize); strncpyz(client_port, "UNKNOWN", portsize); } @@ -1308,7 +1308,7 @@ int accept_socket(int fd, int flags, char *client_ip, size_t ipsize, char *clien } if (!connection_allowed(nfd, client_ip, client_host, hostsize, access_list, "connection", allow_dns)) { errno = 0; - error("Permission denied for client '%s', port '%s'", client_ip, client_port); + netdata_log_error("Permission denied for client '%s', port '%s'", client_ip, client_port); close(nfd); nfd = -1; errno = EPERM; @@ -1316,7 +1316,7 @@ int accept_socket(int fd, int flags, char *client_ip, size_t ipsize, char *clien } #ifdef HAVE_ACCEPT4 else if (errno == ENOSYS) - error("netdata has been compiled with the assumption that the system has the accept4() call, but it is not here. Recompile netdata like this: ./configure --disable-accept4 ..."); + netdata_log_error("netdata has been compiled with the assumption that the system has the accept4() call, but it is not here. Recompile netdata like this: ./configure --disable-accept4 ..."); #endif return nfd; @@ -1457,7 +1457,7 @@ inline void poll_close_fd(POLLINFO *pi) { if(likely(!(pi->flags & POLLINFO_FLAG_DONT_CLOSE))) { if(close(pf->fd) == -1) - error("Failed to close() poll_events() socket %d", pf->fd); + netdata_log_error("Failed to close() poll_events() socket %d", pf->fd); } } @@ -1507,14 +1507,14 @@ void *poll_default_add_callback(POLLINFO *pi, short int *events, void *data) { (void)events; (void)data; - // error("POLLFD: internal error: poll_default_add_callback() called"); + // netdata_log_error("POLLFD: internal error: poll_default_add_callback() called"); return NULL; } void poll_default_del_callback(POLLINFO *pi) { if(pi->data) - error("POLLFD: internal error: del_callback_default() called with data pointer - possible memory leak"); + netdata_log_error("POLLFD: internal error: del_callback_default() called with data pointer - possible memory leak"); } int poll_default_rcv_callback(POLLINFO *pi, short int *events) { @@ -1528,7 +1528,7 @@ int poll_default_rcv_callback(POLLINFO *pi, short int *events) { if (rc < 0) { // read failed if (errno != EWOULDBLOCK && errno != EAGAIN) { - error("POLLFD: poll_default_rcv_callback(): recv() failed with %zd.", rc); + netdata_log_error("POLLFD: poll_default_rcv_callback(): recv() failed with %zd.", rc); return -1; } } else if (rc) { @@ -1565,7 +1565,7 @@ static void poll_events_cleanup(void *data) { } static int poll_process_error(POLLINFO *pi, struct pollfd *pf, short int revents) { - error("POLLFD: LISTENER: received %s %s %s on socket at slot %zu (fd %d) client '%s' port '%s' expecting %s %s %s, having %s %s %s" + netdata_log_error("POLLFD: LISTENER: received %s %s %s on socket at slot %zu (fd %d) client '%s' port '%s' expecting %s %s %s, having %s %s %s" , revents & POLLERR ? "POLLERR" : "" , revents & POLLHUP ? "POLLHUP" : "" , revents & POLLNVAL ? "POLLNVAL" : "" @@ -1673,7 +1673,7 @@ static int poll_process_new_tcp_connection(POLLJOB *p, POLLINFO *pi, struct poll p->used, p->limit); } else if(unlikely(errno != EWOULDBLOCK && errno != EAGAIN)) - error("POLLFD: LISTENER: accept() failed."); + netdata_log_error("POLLFD: LISTENER: accept() failed."); } else { @@ -1720,7 +1720,7 @@ void poll_events(LISTEN_SOCKETS *sockets , size_t max_tcp_sockets ) { if(!sockets || !sockets->opened) { - error("POLLFD: internal error: no listening sockets are opened"); + netdata_log_error("POLLFD: internal error: no listening sockets are opened"); return; } @@ -1827,7 +1827,7 @@ void poll_events(LISTEN_SOCKETS *sockets time_t now = now_boottime_sec(); if(unlikely(retval == -1)) { - error("POLLFD: LISTENER: poll() failed while waiting on %zu sockets.", p.max + 1); + netdata_log_error("POLLFD: LISTENER: poll() failed while waiting on %zu sockets.", p.max + 1); break; } else if(unlikely(!retval)) { @@ -1885,7 +1885,7 @@ void poll_events(LISTEN_SOCKETS *sockets conns[conns_max++] = i; } else - error("POLLFD: LISTENER: server slot %zu (fd %d) connection from %s port %s using unhandled socket type %d." + netdata_log_error("POLLFD: LISTENER: server slot %zu (fd %d) connection from %s port %s using unhandled socket type %d." , i , pi->fd , pi->client_ip ? pi->client_ip : "<undefined-ip>" @@ -1894,7 +1894,7 @@ void poll_events(LISTEN_SOCKETS *sockets ); } else - error("POLLFD: LISTENER: client slot %zu (fd %d) data from %s port %s using flags %08X is neither client nor server." + netdata_log_error("POLLFD: LISTENER: client slot %zu (fd %d) data from %s port %s using flags %08X is neither client nor server." , i , pi->fd , pi->client_ip ? pi->client_ip : "<undefined-ip>" @@ -1903,7 +1903,7 @@ void poll_events(LISTEN_SOCKETS *sockets ); } else - error("POLLFD: LISTENER: socket slot %zu (fd %d) client %s port %s unhandled event id %d." + netdata_log_error("POLLFD: LISTENER: socket slot %zu (fd %d) client %s port %s unhandled event id %d." , i , pi->fd , pi->client_ip ? pi->client_ip : "<undefined-ip>" diff --git a/libnetdata/storage_number/storage_number.c b/libnetdata/storage_number/storage_number.c index ca1eb43aab..6468951bd0 100644 --- a/libnetdata/storage_number/storage_number.c +++ b/libnetdata/storage_number/storage_number.c @@ -121,7 +121,7 @@ storage_number pack_storage_number(NETDATA_DOUBLE value, SN_FLAGS flags) { if(n > (NETDATA_DOUBLE)0x00ffffff) { #ifdef NETDATA_INTERNAL_CHECKS - error("Number " NETDATA_DOUBLE_FORMAT " is too big.", value); + netdata_log_error("Number " NETDATA_DOUBLE_FORMAT " is too big.", value); #endif r += 0x00ffffff; return r; diff --git a/libnetdata/string/string.c b/libnetdata/string/string.c index 9385aa6e8d..159f8a6a84 100644 --- a/libnetdata/string/string.c +++ b/libnetdata/string/string.c @@ -240,7 +240,7 @@ static inline void string_index_delete(STRING *string) { JError_t J_Error; int ret = JudyHSDel(&string_base.JudyHSArray, (void *)string->str, string->length, &J_Error); if (unlikely(ret == JERR)) { - error( + netdata_log_error( "STRING: Cannot delete entry with name '%s' from JudyHS, JU_ERRNO_* == %u, ID == %d", string->str, JU_ERRNO(&J_Error), @@ -250,7 +250,7 @@ static inline void string_index_delete(STRING *string) { } if (unlikely(!deleted)) - error("STRING: tried to delete '%s' that is not in the index. Ignoring it.", string->str); + netdata_log_error("STRING: tried to delete '%s' that is not in the index. Ignoring it.", string->str); else { size_t mem_size = sizeof(STRING) + string->length; string_base.deletes++; diff --git a/libnetdata/threads/threads.c b/libnetdata/threads/threads.c index 5b1adb0e88..1ec6864477 100644 --- a/libnetdata/threads/threads.c +++ b/libnetdata/threads/threads.c @@ -150,12 +150,12 @@ void netdata_threads_init_after_fork(size_t stacksize) { if(netdata_threads_attr && stacksize > (size_t)PTHREAD_STACK_MIN) { i = pthread_attr_setstacksize(netdata_threads_attr, stacksize); if(i != 0) - error("pthread_attr_setstacksize() to %zu bytes, failed with code %d.", stacksize, i); + netdata_log_error("pthread_attr_setstacksize() to %zu bytes, failed with code %d.", stacksize, i); else netdata_log_info("Set threads stack size to %zu bytes", stacksize); } else - error("Invalid pthread stacksize %zu", stacksize); + netdata_log_error("Invalid pthread stacksize %zu", stacksize); } // ---------------------------------------------------------------------------- @@ -169,7 +169,7 @@ void service_exits(void); static void thread_cleanup(void *ptr) { if(netdata_thread != ptr) { NETDATA_THREAD *info = (NETDATA_THREAD *)ptr; - error("THREADS: internal error - thread local variable does not match the one passed to this function. Expected thread '%s', passed thread '%s'", netdata_thread->tag, info->tag); + netdata_log_error("THREADS: internal error - thread local variable does not match the one passed to this function. Expected thread '%s', passed thread '%s'", netdata_thread->tag, info->tag); } if(!(netdata_thread->options & NETDATA_THREAD_OPTION_DONT_LOG_CLEANUP)) @@ -205,7 +205,7 @@ static void thread_set_name_np(NETDATA_THREAD *nt) { #endif if (ret != 0) - error("cannot set pthread name of %d to %s. ErrCode: %d", gettid(), threadname, ret); + netdata_log_error("cannot set pthread name of %d to %s. ErrCode: %d", gettid(), threadname, ret); else netdata_log_info("set name of thread %d to %s", gettid(), threadname); @@ -250,10 +250,10 @@ static void *netdata_thread_init(void *ptr) { netdata_log_info("thread created with task id %d", gettid()); if(pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL) != 0) - error("cannot set pthread cancel type to DEFERRED."); + netdata_log_error("cannot set pthread cancel type to DEFERRED."); if(pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0) - error("cannot set pthread cancel state to ENABLE."); + netdata_log_error("cannot set pthread cancel state to ENABLE."); thread_set_name_np(ptr); @@ -275,13 +275,13 @@ int netdata_thread_create(netdata_thread_t *thread, const char *tag, NETDATA_THR int ret = pthread_create(thread, netdata_threads_attr, netdata_thread_init, info); if(ret != 0) - error("failed to create new thread for %s. pthread_create() failed with code %d", tag, ret); + netdata_log_error("failed to create new thread for %s. pthread_create() failed with code %d", tag, ret); else { if (!(options & NETDATA_THREAD_OPTION_JOINABLE)) { int ret2 = pthread_detach(*thread); if (ret2 != 0) - error("cannot request detach of newly created %s thread. pthread_detach() failed with code %d", tag, ret2); + netdata_log_error("cannot request detach of newly created %s thread. pthread_detach() failed with code %d", tag, ret2); } } @@ -298,9 +298,9 @@ int netdata_thread_cancel(netdata_thread_t thread) { int ret = pthread_cancel(thread); if(ret != 0) #ifdef NETDATA_INTERNAL_CHECKS - error("cannot cancel thread. pthread_cancel() failed with code %d at %d@%s, function %s()", ret, line, file, function); + netdata_log_error("cannot cancel thread. pthread_cancel() failed with code %d at %d@%s, function %s()", ret, line, file, function); #else - error("cannot cancel thread. pthread_cancel() failed with code %d.", ret); + netdata_log_error("cannot cancel thread. pthread_cancel() failed with code %d.", ret); #endif return ret; @@ -312,7 +312,7 @@ int netdata_thread_cancel(netdata_thread_t thread) { int netdata_thread_join(netdata_thread_t thread, void **retval) { int ret = pthread_join(thread, retval); if(ret != 0) - error("cannot join thread. pthread_join() failed with code %d.", ret); + netdata_log_error("cannot join thread. pthread_join() failed with code %d.", ret); return ret; } @@ -320,7 +320,7 @@ int netdata_thread_join(netdata_thread_t thread, void **retval) { int netdata_thread_detach(pthread_t thread) { int ret = pthread_detach(thread); if(ret != 0) - error("cannot detach thread. pthread_detach() failed with code %d.", ret); + netdata_log_error("cannot detach thread. pthread_detach() failed with code %d.", ret); return ret; } diff --git a/libnetdata/worker_utilization/worker_utilization.c b/libnetdata/worker_utilization/worker_utilization.c index c862064194..ad45dbc7f1 100644 --- a/libnetdata/worker_utilization/worker_utilization.c +++ b/libnetdata/worker_utilization/worker_utilization.c @@ -118,7 +118,7 @@ void worker_register_job_custom_metric(size_t job_id, const char *name, const ch if(unlikely(!worker)) return; if(unlikely(job_id >= WORKER_UTILIZATION_MAX_JOB_TYPES)) { - error("WORKER_UTILIZATION: job_id %zu is too big. Max is %zu", job_id, (size_t)(WORKER_UTILIZATION_MAX_JOB_TYPES - 1)); + netdata_log_error("WORKER_UTILIZATION: job_id %zu is too big. Max is %zu", job_id, (size_t)(WORKER_UTILIZATION_MAX_JOB_TYPES - 1)); return; } @@ -127,7 +127,7 @@ void worker_register_job_custom_metric(size_t job_id, const char *name, const ch if(worker->per_job_type[job_id].name) { if(strcmp(string2str(worker->per_job_type[job_id].name), name) != 0 || worker->per_job_type[job_id].type != type || strcmp(string2str(worker->per_job_type[job_id].units), units) != 0) - error("WORKER_UTILIZATION: duplicate job registration: worker '%s' job id %zu is '%s', ignoring the later '%s'", worker->workname, job_id, string2str(worker->per_job_type[job_id].name), name); + netdata_log_error("WORKER_UTILIZATION: duplicate job registration: worker '%s' job id %zu is '%s', ignoring the later '%s'", worker->workname, job_id, string2str(worker->per_job_type[job_id].name), name); return; } diff --git a/ml/Config.cc b/ml/Config.cc index c00d2e8ee3..f733bcf7d9 100644 --- a/ml/Config.cc +++ b/ml/Config.cc @@ -83,7 +83,7 @@ void ml_config_load(ml_config_t *cfg) { */ if (min_train_samples >= max_train_samples) { - error("invalid min/max train samples found (%u >= %u)", min_train_samples, max_train_samples); + netdata_log_error("invalid min/max train samples found (%u >= %u)", min_train_samples, max_train_samples); min_train_samples = 1 * 3600; max_train_samples = 6 * 3600; diff --git a/ml/ml.cc b/ml/ml.cc index 0e09b3612a..3969674923 100644 --- a/ml/ml.cc +++ b/ml/ml.cc @@ -1390,7 +1390,7 @@ void ml_host_get_models(RRDHOST *rh, BUFFER *wb) UNUSED(wb); // TODO: To be implemented - error("Fetching KMeans models is not supported yet"); + netdata_log_error("Fetching KMeans models is not supported yet"); } void ml_chart_new(RRDSET *rs) @@ -1519,11 +1519,11 @@ static void ml_flush_pending_models(ml_training_thread_t *training_thread) { // try to rollback transaction if we got any failures if (rc) { - error("Trying to rollback ML transaction because it failed with rc=%d, op_no=%d", rc, op_no); + netdata_log_error("Trying to rollback ML transaction because it failed with rc=%d, op_no=%d", rc, op_no); op_no++; rc = db_execute(db, "ROLLBACK;"); if (rc) - error("ML transaction rollback failed with rc=%d", rc); + netdata_log_error("ML transaction rollback failed with rc=%d", rc); } training_thread->pending_model_info.clear(); diff --git a/registry/registry_db.c b/registry/registry_db.c index ae74aa5304..f0586ab21a 100644 --- a/registry/registry_db.c +++ b/registry/registry_db.c @@ -122,7 +122,7 @@ int registry_db_save(void) { debug(D_REGISTRY, "Registry: Creating file '%s'", tmp_filename); FILE *fp = fopen(tmp_filename, "w"); if(!fp) { - error("Registry: Cannot create file: %s", tmp_filename); + netdata_log_error("Registry: Cannot create file: %s", tmp_filename); error_log_limit_reset(); return -1; } @@ -132,7 +132,7 @@ int registry_db_save(void) { debug(D_REGISTRY, "Saving all machines"); int bytes1 = dictionary_walkthrough_read(registry.machines, registry_machine_save, fp); if(bytes1 < 0) { - error("Registry: Cannot save registry machines - return value %d", bytes1); + netdata_log_error("Registry: Cannot save registry machines - return value %d", bytes1); fclose(fp); error_log_limit_reset(); return bytes1; @@ -142,7 +142,7 @@ int registry_db_save(void) { debug(D_REGISTRY, "Saving all persons"); int bytes2 = dictionary_walkthrough_read(registry.persons, registry_person_save, fp); if(bytes2 < 0) { - error("Registry: Cannot save registry persons - return value %d", bytes2); + netdata_log_error("Registry: Cannot save registry persons - return value %d", bytes2); fclose(fp); error_log_limit_reset(); return bytes2; @@ -166,34 +166,34 @@ int registry_db_save(void) { // remove the .old db debug(D_REGISTRY, "Registry: Removing old db '%s'", old_filename); if(unlink(old_filename) == -1 && errno != ENOENT) - error("Registry: cannot remove old registry file '%s'", old_filename); + netdata_log_error("Registry: cannot remove old registry file '%s'", old_filename); // rename the db to .old debug(D_REGISTRY, "Registry: Link current db '%s' to .old: '%s'", registry.db_filename, old_filename); if(link(registry.db_filename, old_filename) == -1 && errno != ENOENT) - error("Registry: cannot move file '%s' to '%s'. Saving registry DB failed!", registry.db_filename, old_filename); + netdata_log_error("Registry: cannot move file '%s' to '%s'. Saving registry DB failed!", registry.db_filename, old_filename); else { // remove the database (it is saved in .old) debug(D_REGISTRY, "Registry: removing db '%s'", registry.db_filename); if (unlink(registry.db_filename) == -1 && errno != ENOENT) - error("Registry: cannot remove old registry file '%s'", registry.db_filename); + netdata_log_error("Registry: cannot remove old registry file '%s'", registry.db_filename); // move the .tmp to make it active debug(D_REGISTRY, "Registry: linking tmp db '%s' to active db '%s'", tmp_filename, registry.db_filename); if (link(tmp_filename, registry.db_filename) == -1) { - error("Registry: cannot move file '%s' to '%s'. Saving registry DB failed!", tmp_filename, + netdata_log_error("Registry: cannot move file '%s' to '%s'. Saving registry DB failed!", tmp_filename, registry.db_filename); // move the .old back debug(D_REGISTRY, "Registry: linking old db '%s' to active db '%s'", old_filename, registry.db_filename); if(link(old_filename, registry.db_filename) == -1) - error("Registry: cannot move file '%s' to '%s'. Recovering the old registry DB failed!", old_filename, registry.db_filename); + netdata_log_error("Registry: cannot move file '%s' to '%s'. Recovering the old registry DB failed!", old_filename, registry.db_filename); } else { debug(D_REGISTRY, "Registry: removing tmp db '%s'", tmp_filename); if(unlink(tmp_filename) == -1) - error("Registry: cannot remove tmp registry file '%s'", tmp_filename); + netdata_log_error("Registry: cannot remove tmp registry file '%s'", tmp_filename); // it has been moved successfully // discard the current registry log @@ -221,7 +221,7 @@ size_t registry_db_load(void) { debug(D_REGISTRY, "Registry: loading active db from: '%s'", registry.db_filename); FILE *fp = fopen(registry.db_filename, "r"); if(!fp) { - error("Registry: cannot open registry file: '%s'", registry.db_filename); + netdata_log_error("Registry: cannot open registry file: '%s'", registry.db_filename); return 0; } @@ -234,7 +234,7 @@ size_t registry_db_load(void) { switch(*s) { case 'T': // totals if(unlikely(len != 103 || s[1] != '\t' || s[18] != '\t' || s[35] != '\t' || s[52] != '\t' || s[69] != '\t' || s[86] != '\t' || s[103] != '\0')) { - error("Registry totals line %zu is wrong (len = %zu).", line, len); + netdata_log_error("Registry totals line %zu is wrong (len = %zu).", line, len); continue; } registry.persons_count = strtoull(&s[2], NULL, 16); @@ -249,7 +249,7 @@ size_t registry_db_load(void) { m = NULL; // verify it is valid if(unlikely(len != 65 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[65] != '\0')) { - error("Registry person line %zu is wrong (len = %zu).", line, len); + netdata_log_error("Registry person line %zu is wrong (len = %zu).", line, len); continue; } @@ -264,7 +264,7 @@ size_t registry_db_load(void) { p = NULL; // verify it is valid if(unlikely(len != 65 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[65] != '\0')) { - error("Registry person line %zu is wrong (len = %zu).", line, len); + netdata_log_error("Registry person line %zu is wrong (len = %zu).", line, len); continue; } @@ -277,13 +277,13 @@ size_t registry_db_load(void) { case 'U': // person URL if(unlikely(!p)) { - error("Registry: ignoring line %zu, no person loaded: %s", line, s); + netdata_log_error("Registry: ignoring line %zu, no person loaded: %s", line, s); continue; } // verify it is valid if(len < 69 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[31] != '\t' || s[68] != '\t') { - error("Registry person URL line %zu is wrong (len = %zu).", line, len); + netdata_log_error("Registry person URL line %zu is wrong (len = %zu).", line, len); continue; } @@ -293,7 +293,7 @@ size_t registry_db_load(void) { char *url = &s[69]; while(*url && *url != '\t') url++; if(!*url) { - error("Registry person URL line %zu does not have a url.", line); + netdata_log_error("Registry person URL line %zu does not have a url.", line); continue; } *url++ = '\0'; @@ -315,13 +315,13 @@ size_t registry_db_load(void) { case 'V': // machine URL if(unlikely(!m)) { - error("Registry: ignoring line %zu, no machine loaded: %s", line, s); + netdata_log_error("Registry: ignoring line %zu, no machine loaded: %s", line, s); continue; } // verify it is valid if(len < 32 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[31] != '\t') { - error("Registry person URL line %zu is wrong (len = %zu).", line, len); + netdata_log_error("Registry person URL line %zu is wrong (len = %zu).", line, len); continue; } @@ -337,7 +337,7 @@ size_t registry_db_load(void) { break; default: - error("Registry: ignoring line %zu of filename '%s': %s.", line, registry.db_filename, s); + netdata_log_error("Registry: ignoring line %zu of filename '%s': %s.", line, registry.db_filename, s); break; } } diff --git a/registry/registry_internals.c b/registry/registry_internals.c index 59a0db82b0..d034ee231a 100644 --- a/registry/registry_internals.c +++ b/registry/registry_internals.c @@ -270,7 +270,7 @@ static inline int is_machine_guid_blacklisted(const char *guid) { if(!strcmp(guid, "8a795b0c-2311-11e6-8563-000c295076a6") || !strcmp(guid, "4aed1458-1c3e-11e6-a53f-000c290fc8f5") ) { - error("Blacklisted machine GUID '%s' found.", guid); + netdata_log_error("Blacklisted machine GUID '%s' found.", guid); return 1; } @@ -292,11 +292,11 @@ char *registry_get_this_machine_guid(void) { if(fd != -1) { char buf[GUID_LEN + 1]; if(read(fd, buf, GUID_LEN) != GUID_LEN) - error("Failed to read machine GUID from '%s'", registry.machine_guid_filename); + netdata_log_error("Failed to read machine GUID from '%s'", registry.machine_guid_filename); else { buf[GUID_LEN] = '\0'; if(regenerate_guid(buf, guid) == -1) { - error("Failed to validate machine GUID '%s' from '%s'. Ignoring it - this might mean this netdata will appear as duplicate in the registry.", + netdata_log_error("Failed to validate machine GUID '%s' from '%s'. Ignoring it - this might mean this netdata will appear as duplicate in the registry.", buf, registry.machine_guid_filename); guid[0] = '\0'; diff --git a/registry/registry_log.c b/registry/registry_log.c index b048135e60..4309d40631 100644 --- a/registry/registry_log.c +++ b/registry/registry_log.c @@ -12,7 +12,7 @@ void registry_log(char action, REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY m->guid, name, u->url) < 0)) - error("Registry: failed to save log. Registry data may be lost in case of abnormal restart."); + netdata_log_error("Registry: failed to save log. Registry data may be lost in case of abnormal restart."); // we increase the counter even on failures // so that the registry will be saved periodically @@ -33,11 +33,11 @@ int registry_log_open(void) { registry.log_fp = fopen(registry.log_filename, "a"); if(registry.log_fp) { if (setvbuf(registry.log_fp, NULL, _IOLBF, 0) != 0) - error("Cannot set line buffering on registry log file."); + netdata_log_error("Cannot set line buffering on registry log file."); return 0; } - error("Cannot open registry log file '%s'. Registry data will be lost in case of netdata or server crash.", registry.log_filename); + netdata_log_error("Cannot open registry log file '%s'. Registry data will be lost in case of netdata or server crash.", registry.log_filename); return -1; } @@ -55,7 +55,8 @@ void registry_log_recreate(void) { // open it with truncate registry.log_fp = fopen(registry.log_filename, "w"); if(registry.log_fp) fclose(registry.log_fp); - else error("Cannot truncate registry log '%s'", registry.log_filename); + else + netdata_log_error("Cannot truncate registry log '%s'", registry.log_filename); registry.log_fp = NULL; registry_log_open(); @@ -72,7 +73,7 @@ ssize_t registry_log_load(void) { debug(D_REGISTRY, "Registry: loading active db from: %s", registry.log_filename); FILE *fp = fopen(registry.log_filename, "r"); if(!fp) - error("Registry: cannot open registry file: %s", registry.log_filename); + netdata_log_error("Registry: cannot open registry file: %s", registry.log_filename); else { char *s, buf[4096 + 1]; line = 0; @@ -87,7 +88,7 @@ ssize_t registry_log_load(void) { // verify it is valid if (unlikely(len < 85 || s[1] != '\t' || s[10] != '\t' || s[47] != '\t' || s[84] != '\t')) { - error("Registry: log line %zd is wrong (len = %zu).", line, len); + netdata_log_error("Registry: log line %zd is wrong (len = %zu).", line, len); continue; } s[1] = s[10] = s[47] = s[84] = '\0'; @@ -102,7 +103,7 @@ ssize_t registry_log_load(void) { char *url = name; while(*url && *url != '\t') url++; if(!*url) { - error("Registry: log line %zd does not have a url.", line); + netdata_log_error("Registry: log line %zd does not have a url.", line); continue; } *url++ = '\0'; @@ -121,7 +122,7 @@ ssize_t registry_log_load(void) { break; default: - error("Registry: ignoring line %zd of filename '%s': %s.", line, registry.log_filename, s); + netdata_log_error("Registry: ignoring line %zd of filename '%s': %s.", line, registry.log_filename, s); break; } } diff --git a/registry/registry_person.c b/registry/registry_person.c index e16eabef0e..6ae0793b6d 100644 --- a/registry/registry_person.c +++ b/registry/registry_person.c @@ -34,7 +34,7 @@ inline REGISTRY_PERSON_URL *registry_person_url_index_add(REGISTRY_PERSON *p, RE debug(D_REGISTRY, "Registry: registry_person_url_index_add('%s', '%s')", p->guid, pu->url->url); REGISTRY_PERSON_URL *tpu = (REGISTRY_PERSON_URL *)avl_insert(&(p->person_urls), (avl_t *)(pu)); if(tpu != pu) - error("Registry: registry_person_url_index_add('%s', '%s') already exists as '%s'", p->guid, pu->url->url, tpu->url->url); + netdata_log_error("Registry: registry_person_url_index_add('%s', '%s') already exists as '%s'", p->guid, pu->url->url, tpu->url->url); return tpu; } @@ -43,9 +43,9 @@ inline REGISTRY_PERSON_URL *registry_person_url_index_del(REGISTRY_PERSON *p, RE debug(D_REGISTRY, "Registry: registry_person_url_index_del('%s', '%s')", p->guid, pu->url->url); REGISTRY_PERSON_URL *tpu = (REGISTRY_PERSON_URL *)avl_remove(&(p->person_urls), (avl_t *)(pu)); if(!tpu) - error("Registry: registry_person_url_index_del('%s', '%s') deleted nothing", p->guid, pu->url->url); + netdata_log_error("Registry: registry_person_url_index_del('%s', '%s') deleted nothing", p->guid, pu->url->url); else if(tpu != pu) - error("Registry: registry_person_url_index_del('%s', '%s') deleted wrong URL '%s'", p->guid, pu->url->url, tpu->url->url); + netdata_log_error("Registry: registry_person_url_index_del('%s', '%s') deleted wrong URL '%s'", p->guid, pu->url->url, tpu->url->url); return tpu; } @@ -78,7 +78,7 @@ REGISTRY_PERSON_URL *registry_person_url_allocate(REGISTRY_PERSON *p, REGISTRY_M debug(D_REGISTRY, "registry_person_url_allocate('%s', '%s', '%s'): indexing URL in person", p->guid, m->guid, u->url); REGISTRY_PERSON_URL *tpu = registry_person_url_index_add(p, pu); if(tpu != pu) { - error("Registry: Attempted to add duplicate person url '%s' with name '%s' to person '%s'", u->url, name, p->guid); + netdata_log_error("Registry: Attempted to add duplicate person url '%s' with name '%s' to person '%s'", u->url, name, p->guid); freez(pu); pu = tpu; } diff --git a/registry/registry_url.c b/registry/registry_url.c index 699e5e6800..b197b2254e 100644 --- a/registry/registry_url.c +++ b/registry/registry_url.c @@ -50,7 +50,7 @@ REGISTRY_URL *registry_url_get(const char *url, size_t urllen) { debug(D_REGISTRY, "Registry: registry_url_get('%s'): indexing it", url); n = registry_url_index_add(u); if(n != u) { - error("INTERNAL ERROR: registry_url_get(): url '%s' already exists in the registry as '%s'", u->url, n->url); + netdata_log_error("INTERNAL ERROR: registry_url_get(): url '%s' already exists in the registry as '%s'", u->url, n->url); freez(u); u = n; } @@ -72,11 +72,11 @@ void registry_url_unlink(REGISTRY_URL *u) { debug(D_REGISTRY, "Registry: registry_url_unlink('%s'): No more links for this URL", u->url); REGISTRY_URL *n = registry_url_index_del(u); if(!n) { - error("INTERNAL ERROR: registry_url_unlink('%s'): cannot find url in index", u->url); + netdata_log_error("INTERNAL ERROR: registry_url_unlink('%s'): cannot find url in index", u->url); } else { if(n != u) { - error("INTERNAL ERROR: registry_url_unlink('%s'): deleted different url '%s'", u->url, n->url); + netdata_log_error("INTERNAL ERROR: registry_url_unlink('%s'): deleted different url '%s'", u->url, n->url); } registry.urls_memory -= sizeof(REGISTRY_URL) + n->len; // no need for +1, 1 is already in REGISTRY_URL diff --git a/spawn/spawn.c b/spawn/spawn.c index 87e204dc46..a9359da04e 100644 --- a/spawn/spawn.c +++ b/spawn/spawn.c @@ -219,7 +219,7 @@ int create_spawn_server(uv_loop_t *loop, uv_pipe_t *spawn_channel, uv_process_t ret = uv_spawn(loop, process, &options); /* execute the netdata binary again as the netdata user */ if (0 != ret) { - error("uv_spawn (process: \"%s\") (user: %s) failed (%s).", exepath, user, uv_strerror(ret)); + netdata_log_error("uv_spawn (process: \"%s\") (user: %s) failed (%s).", exepath, user, uv_strerror(ret)); fatal("Cannot start netdata without the spawn server."); } @@ -242,7 +242,7 @@ void spawn_init(void) completion_init(&completion); error = uv_thread_create(&thread, spawn_client, &completion); if (error) { - error("uv_thread_create(): %s", uv_strerror(error)); + netdata_log_error("uv_thread_create(): %s", uv_strerror(error)); goto after_error; } /* wait for spawn client thread to initialize */ @@ -253,7 +253,7 @@ void spawn_init(void) if (spawn_thread_error) { error = uv_thread_join(&thread); if (error) { - error("uv_thread_create(): %s", uv_strerror(error)); + netdata_log_error("uv_thread_create(): %s", uv_strerror(error)); } goto after_error; } @@ -285,5 +285,5 @@ void spawn_init(void) return; after_error: - error("Failed to initialize spawn service. The alarms notifications will not be spawned."); + netdata_log_error("Failed to initialize spawn service. The alarms notifications will not be spawned."); } diff --git a/spawn/spawn_client.c b/spawn/spawn_client.c index 6a512e07d6..8928a468c6 100644 --- a/spawn/spawn_client.c +++ b/spawn/spawn_client.c @@ -106,7 +106,7 @@ static void on_pipe_read(uv_stream_t* pipe, ssize_t nread, const uv_buf_t* buf) } else if (UV_EOF == nread) { netdata_log_info("EOF found in spawn pipe."); } else if (nread < 0) { - error("%s: %s", __func__, uv_strerror(nread)); + netdata_log_error("%s: %s", __func__, uv_strerror(nread)); } if (nread < 0) { /* stop stream due to EOF or error */ @@ -176,7 +176,7 @@ void spawn_client(void *arg) loop = mallocz(sizeof(uv_loop_t)); ret = uv_loop_init(loop); if (ret) { - error("uv_loop_init(): %s", uv_strerror(ret)); + netdata_log_error("uv_loop_init(): %s", uv_strerror(ret)); spawn_thread_error = ret; goto error_after_loop_init; } @@ -185,14 +185,14 @@ void spawn_client(void *arg) spawn_async.data = NULL; ret = uv_async_init(loop, &spawn_async, async_cb); if (ret) { - error("uv_async_init(): %s", uv_strerror(ret)); + netdata_log_error("uv_async_init(): %s", uv_strerror(ret)); spawn_thread_error = ret; goto error_after_async_init; } ret = uv_pipe_init(loop, &spawn_channel, 1); if (ret) { - error("uv_pipe_init(): %s", uv_strerror(ret)); + netdata_log_error("uv_pipe_init(): %s", uv_strerror(ret)); spawn_thread_error = ret; goto error_after_pipe_init; } @@ -200,7 +200,7 @@ void spawn_client(void *arg) ret = create_spawn_server(loop, &spawn_channel, &process); if (ret) { - error("Failed to fork spawn server process."); + netdata_log_error("Failed to fork spawn server process."); spawn_thread_error = ret; goto error_after_spawn_server; } diff --git a/streaming/compression.c b/streaming/compression.c index 5a19f46a68..b4df47f962 100644 --- a/streaming/compression.c +++ b/streaming/compression.c @@ -52,7 +52,7 @@ size_t rrdpush_compress(struct compressor_state *state, const char *data, size_t return 0; if(unlikely(size > COMPRESSION_MAX_MSG_SIZE)) { - error("RRDPUSH COMPRESS: Compression Failed - Message size %lu above compression buffer limit: %d", + netdata_log_error("RRDPUSH COMPRESS: Compression Failed - Message size %lu above compression buffer limit: %d", (long unsigned int)size, COMPRESSION_MAX_MSG_SIZE); return 0; } @@ -83,7 +83,7 @@ size_t rrdpush_compress(struct compressor_state *state, const char *data, size_t 1); if (compressed_data_size < 0) { - error("Data compression error: %ld", compressed_data_size); + netdata_log_error("Data compression error: %ld", compressed_data_size); return 0; } @@ -125,7 +125,7 @@ size_t rrdpush_decompress(struct decompressor_state *state, const char *compress ); if (unlikely(decompressed_size < 0)) { - error("RRDPUSH DECOMPRESS: decompressor returned negative decompressed bytes: %ld", decompressed_size); + netdata_log_error("RRDPUSH DECOMPRESS: decompressor returned negative decompressed bytes: %ld", decompressed_size); return 0; } diff --git a/streaming/receiver.c b/streaming/receiver.c index e7233f6097..395fcb1945 100644 --- a/streaming/receiver.c +++ b/streaming/receiver.c @@ -77,15 +77,15 @@ static inline int read_stream(struct receiver_state *r, char* buffer, size_t siz } while(bytes_read < 0 && errno == EINTR && tries--); if((bytes_read == 0 || bytes_read == -1) && (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINPROGRESS)) { - error("STREAM: %s(): timeout while waiting for data on socket!", __FUNCTION__); + netdata_log_error("STREAM: %s(): timeout while waiting for data on socket!", __FUNCTION__); bytes_read = -3; } else if (bytes_read == 0) { - error("STREAM: %s(): EOF while reading data from socket!", __FUNCTION__); + netdata_log_error("STREAM: %s(): EOF while reading data from socket!", __FUNCTION__); bytes_read = -1; } else if (bytes_read < 0) { - error("STREAM: %s() failed to read from socket!", __FUNCTION__); + netdata_log_error("STREAM: %s() failed to read from socket!", __FUNCTION__); bytes_read = -2; } @@ -170,7 +170,7 @@ static inline bool receiver_read_compressed(struct receiver_state *r) { } if(unlikely(compressed_message_size > COMPRESSION_MAX_MSG_SIZE)) { - error("received a compressed message of %zu bytes, which is bigger than the max compressed message size supported of %zu. Ignoring message.", + netdata_log_error("received a compressed message of %zu bytes, which is bigger than the max compressed message size supported of %zu. Ignoring message.", compressed_message_size, (size_t)COMPRESSION_MAX_MSG_SIZE); return false; } @@ -259,7 +259,7 @@ inline char *buffered_reader_next_line(struct buffered_reader *reader, char *dst // if the destination is full, oops! if(ds == de) { - error("STREAM: received line exceeds %d bytes. Truncating it.", PLUGINSD_LINE_MAX); + netdata_log_error("STREAM: received line exceeds %d bytes. Truncating it.", PLUGINSD_LINE_MAX); *ds = '\0'; reader->pos = ss - reader->read_buffer; return dst; @@ -498,7 +498,7 @@ bool stop_streaming_receiver(RRDHOST *host, STREAM_HANDSHAKE reason) { } if(host->receiver) - error("STREAM '%s' [receive from [%s]:%s]: " + netdata_log_error("STREAM '%s' [receive from [%s]:%s]: " "thread %d takes too long to stop, giving up..." , rrdhost_hostname(host) , host->receiver->client_ip, host->receiver->client_port @@ -573,7 +573,7 @@ static void rrdpush_receive(struct receiver_state *rpt) rpt->config.mode = rrd_memory_mode_id(appconfig_get(&stream_config, rpt->machine_guid, "memory mode", rrd_memory_mode_name(rpt->config.mode))); if (unlikely(rpt->config.mode == RRD_MEMORY_MODE_DBENGINE && !dbengine_enabled)) { - error("STREAM '%s' [receive from %s:%s]: " + netdata_log_error("STREAM '%s' [receive from %s:%s]: " "dbengine is not enabled, falling back to default." , rpt->hostname , rpt->client_ip, rpt->client_port @@ -751,7 +751,7 @@ static void rrdpush_receive(struct receiver_state *rpt) { // remove the non-blocking flag from the socket if(sock_delnonblock(rpt->fd) < 0) - error("STREAM '%s' [receive from [%s]:%s]: " + netdata_log_error("STREAM '%s' [receive from [%s]:%s]: " "cannot remove the non-blocking flag from socket %d" , rrdhost_hostname(rpt->host) , rpt->client_ip, rpt->client_port @@ -761,7 +761,7 @@ static void rrdpush_receive(struct receiver_state *rpt) timeout.tv_sec = 600; timeout.tv_usec = 0; if (unlikely(setsockopt(rpt->fd, SOL_SOCKET, SO_RCVTIMEO, &timeout, sizeof timeout) != 0)) - error("STREAM '%s' [receive from [%s]:%s]: " + netdata_log_error("STREAM '%s' [receive from [%s]:%s]: " "cannot set timeout for socket %d" , rrdhost_hostname(rpt->host) , rpt->client_ip, rpt->client_port diff --git a/streaming/replication.c b/streaming/replication.c index cf4202f0b5..0e5a0b4071 100644 --- a/streaming/replication.c +++ b/streaming/replication.c @@ -799,7 +799,7 @@ static bool send_replay_chart_cmd(struct replication_request_details *r, const c ssize_t ret = r->caller.callback(buffer, r->caller.data); if (ret < 0) { - error("REPLAY ERROR: 'host:%s/chart:%s' failed to send replication request to child (error %zd)", + netdata_log_error("REPLAY ERROR: 'host:%s/chart:%s' failed to send replication request to child (error %zd)", rrdhost_hostname(r->host), rrdset_id(r->st), ret); return false; } @@ -1860,7 +1860,7 @@ void *replication_thread_main(void *ptr __maybe_unused) { int threads = config_get_number(CONFIG_SECTION_DB, "replication threads", 1); if(threads < 1 || threads > MAX_REPLICATION_THREADS) { - error("replication threads given %d is invalid, resetting to 1", threads); + netdata_log_error("replication threads given %d is invalid, resetting to 1", threads); threads = 1; } diff --git a/streaming/rrdpush.c b/streaming/rrdpush.c index 29d5a981b7..f097ce2d2d 100644 --- a/streaming/rrdpush.c +++ b/streaming/rrdpush.c @@ -148,7 +148,7 @@ int rrdpush_init() { #endif if(default_rrdpush_enabled && (!default_rrdpush_destination || !*default_rrdpush_destination || !default_rrdpush_api_key || !*default_rrdpush_api_key)) { - error("STREAM [send]: cannot enable sending thread - information is missing."); + netdata_log_error("STREAM [send]: cannot enable sending thread - information is missing."); default_rrdpush_enabled = 0; } @@ -479,7 +479,7 @@ RRDSET_STREAM_BUFFER rrdset_push_metric_initialize(RRDSET *st, time_t wall_clock if(unlikely(!(host_flags & RRDHOST_FLAG_RRDPUSH_SENDER_LOGGED_STATUS))) { rrdhost_flag_set(host, RRDHOST_FLAG_RRDPUSH_SENDER_LOGGED_STATUS); - error("STREAM %s [send]: not ready - collected metrics are not sent to parent.", rrdhost_hostname(host)); + netdata_log_error("STREAM %s [send]: not ready - collected metrics are not sent to parent.", rrdhost_hostname(host)); } return (RRDSET_STREAM_BUFFER) { .wb = NULL, }; @@ -727,7 +727,7 @@ static void rrdpush_sender_thread_spawn(RRDHOST *host) { snprintfz(tag, NETDATA_THREAD_TAG_MAX, THREAD_TAG_STREAM_SENDER "[%s]", rrdhost_hostname(host)); if(netdata_thread_create(&host->rrdpush_sender_thread, tag, NETDATA_THREAD_OPTION_DEFAULT, rrdpush_sender_thread, (void *) host->sender)) - error("STREAM %s [send]: failed to create new thread for client.", rrdhost_hostname(host)); + netdata_log_error("STREAM %s [send]: failed to create new thread for client.", rrdhost_hostname(host)); else rrdhost_flag_set(host, RRDHOST_FLAG_RRDPUSH_SENDER_SPAWN); } @@ -1075,7 +1075,7 @@ int rrdpush_receiver_thread_spawn(struct web_client *w, char *decoded_query_stri #endif rpt->fd, initial_response, strlen(initial_response), 0, 60) != (ssize_t)strlen(initial_response)) { - error("STREAM '%s' [receive from [%s]:%s]: " + netdata_log_error("STREAM '%s' [receive from [%s]:%s]: " "failed to reply." , rpt->hostname , rpt->client_ip, rpt->client_port diff --git a/streaming/sender.c b/streaming/sender.c index 68a1d3885f..07cb5dd10e 100644 --- a/streaming/sender.c +++ b/streaming/sender.c @@ -74,9 +74,9 @@ static inline void rrdpush_sender_thread_close_socket(RRDHOST *host); */ static inline void deactivate_compression(struct sender_state *s) { worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_NO_COMPRESSION); - error("STREAM_COMPRESSION: Compression returned error, disabling it."); + netdata_log_error("STREAM_COMPRESSION: Compression returned error, disabling it."); s->flags &= ~SENDER_FLAG_COMPRESSION; - error("STREAM %s [send to %s]: Restarting connection without compression.", rrdhost_hostname(s->host), s->connected_to); + netdata_log_error("STREAM %s [send to %s]: Restarting connection without compression.", rrdhost_hostname(s->host), s->connected_to); rrdpush_sender_thread_close_socket(s->host); } #endif @@ -146,13 +146,13 @@ void sender_commit(struct sender_state *s, BUFFER *wb, STREAM_TRAFFIC_TYPE type) char *dst; size_t dst_len = rrdpush_compress(&s->compressor, src, size_to_compress, &dst); if (!dst_len) { - error("STREAM %s [send to %s]: COMPRESSION failed. Resetting compressor and re-trying", + netdata_log_error("STREAM %s [send to %s]: COMPRESSION failed. Resetting compressor and re-trying", rrdhost_hostname(s->host), s->connected_to); rrdpush_compressor_reset(&s->compressor); dst_len = rrdpush_compress(&s->compressor, src, size_to_compress, &dst); if(!dst_len) { - error("STREAM %s [send to %s]: COMPRESSION failed again. Deactivating compression", + netdata_log_error("STREAM %s [send to %s]: COMPRESSION failed again. Deactivating compression", rrdhost_hostname(s->host), s->connected_to); deactivate_compression(s); @@ -509,7 +509,7 @@ static inline bool rrdpush_sender_validate_response(RRDHOST *host, struct sender char buf[LOG_DATE_LENGTH]; log_date(buf, LOG_DATE_LENGTH, host->destination->postpone_reconnection_until); - error("STREAM %s [send to %s]: %s - will retry in %ld secs, at %s", + netdata_log_error("STREAM %s [send to %s]: %s - will retry in %ld secs, at %s", rrdhost_hostname(host), s->connected_to, error, delay, buf); return false; @@ -541,7 +541,7 @@ static bool rrdpush_sender_connect_ssl(struct sender_state *s) { // certificate is not valid worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_SSL_ERROR); - error("SSL: closing the stream connection, because the server SSL certificate is not valid."); + netdata_log_error("SSL: closing the stream connection, because the server SSL certificate is not valid."); rrdpush_sender_thread_close_socket(host); host->destination->reason = STREAM_HANDSHAKE_ERROR_INVALID_CERTIFICATE; host->destination->postpone_reconnection_until = now_realtime_sec() + 5 * 60; @@ -551,7 +551,7 @@ static bool rrdpush_sender_connect_ssl(struct sender_state *s) { return true; } - error("SSL: failed to establish connection."); + netdata_log_error("SSL: failed to establish connection."); return false; #else @@ -581,7 +581,7 @@ static bool rrdpush_sender_thread_connect_to_parent(RRDHOST *host, int default_p ); if(unlikely(s->rrdpush_sender_socket == -1)) { - // error("STREAM %s [send to %s]: could not connect to parent node at this time.", rrdhost_hostname(host), host->rrdpush_send_destination); + // netdata_log_error("STREAM %s [send to %s]: could not connect to parent node at this time.", rrdhost_hostname(host), host->rrdpush_send_destination); return false; } @@ -720,7 +720,7 @@ static bool rrdpush_sender_thread_connect_to_parent(RRDHOST *host, int default_p if(bytes <= 0) { // timeout is 0 worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_TIMEOUT); rrdpush_sender_thread_close_socket(host); - error("STREAM %s [send to %s]: failed to send HTTP header to remote netdata.", rrdhost_hostname(host), s->connected_to); + netdata_log_error("STREAM %s [send to %s]: failed to send HTTP header to remote netdata.", rrdhost_hostname(host), s->connected_to); host->destination->reason = STREAM_HANDSHAKE_ERROR_SEND_TIMEOUT; host->destination->postpone_reconnection_until = now_realtime_sec() + 1 * 60; return false; @@ -739,17 +739,17 @@ static bool rrdpush_sender_thread_connect_to_parent(RRDHOST *host, int default_p if(bytes <= 0) { // timeout is 0 worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_TIMEOUT); rrdpush_sender_thread_close_socket(host); - error("STREAM %s [send to %s]: remote netdata does not respond.", rrdhost_hostname(host), s->connected_to); + netdata_log_error("STREAM %s [send to %s]: remote netdata does not respond.", rrdhost_hostname(host), s->connected_to); host->destination->reason = STREAM_HANDSHAKE_ERROR_RECEIVE_TIMEOUT; host->destination->postpone_reconnection_until = now_realtime_sec() + 30; return false; } if(sock_setnonblock(s->rrdpush_sender_socket) < 0) - error("STREAM %s [send to %s]: cannot set non-blocking mode for socket.", rrdhost_hostname(host), s->connected_to); + netdata_log_error("STREAM %s [send to %s]: cannot set non-blocking mode for socket.", rrdhost_hostname(host), s->connected_to); if(sock_enlarge_out(s->rrdpush_sender_socket) < 0) - error("STREAM %s [send to %s]: cannot enlarge the socket buffer.", rrdhost_hostname(host), s->connected_to); + netdata_log_error("STREAM %s [send to %s]: cannot enlarge the socket buffer.", rrdhost_hostname(host), s->connected_to); http[bytes] = '\0'; debug(D_STREAM, "Response to sender from far end: %s", http); @@ -846,7 +846,7 @@ static ssize_t attempt_to_send(struct sender_state *s) { else if (ret == -1) { worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_SEND_ERROR); debug(D_STREAM, "STREAM: Send failed - closing socket..."); - error("STREAM %s [send to %s]: failed to send metrics - closing connection - we have sent %zu bytes on this connection.", rrdhost_hostname(s->host), s->connected_to, s->sent_bytes_on_this_connection); + netdata_log_error("STREAM %s [send to %s]: failed to send metrics - closing connection - we have sent %zu bytes on this connection.", rrdhost_hostname(s->host), s->connected_to, s->sent_bytes_on_this_connection); rrdpush_sender_thread_close_socket(s->host); } else @@ -886,11 +886,11 @@ static ssize_t attempt_read(struct sender_state *s) { if (ret == 0 || errno == ECONNRESET) { worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_PARENT_CLOSED); - error("STREAM %s [send to %s]: connection closed by far end.", rrdhost_hostname(s->host), s->connected_to); + netdata_log_error("STREAM %s [send to %s]: connection closed by far end.", rrdhost_hostname(s->host), s->connected_to); } else { worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_RECEIVE_ERROR); - error("STREAM %s [send to %s]: error during receive (%zd) - closing connection.", rrdhost_hostname(s->host), s->connected_to, ret); + netdata_log_error("STREAM %s [send to %s]: error during receive (%zd) - closing connection.", rrdhost_hostname(s->host), s->connected_to, ret); } rrdpush_sender_thread_close_socket(s->host); @@ -962,7 +962,7 @@ void execute_commands(struct sender_state *s) { char *function = get_word(words, num_words, 3); if(!transaction || !*transaction || !timeout_s || !*timeout_s || !function || !*function) { - error("STREAM %s [send to %s] %s execution command is incomplete (transaction = '%s', timeout = '%s', function = '%s'). Ignoring it.", + netdata_log_error("STREAM %s [send to %s] %s execution command is incomplete (transaction = '%s', timeout = '%s', function = '%s'). Ignoring it.", rrdhost_hostname(s->host), s->connected_to, keyword, transaction?transaction:"(unset)", @@ -995,7 +995,7 @@ void execute_commands(struct sender_state *s) { const char *before = get_word(words, num_words, 4); if (!chart_id || !start_streaming || !after || !before) { - error("STREAM %s [send to %s] %s command is incomplete" + netdata_log_error("STREAM %s [send to %s] %s command is incomplete" " (chart=%s, start_streaming=%s, after=%s, before=%s)", rrdhost_hostname(s->host), s->connected_to, keyword, @@ -1013,7 +1013,7 @@ void execute_commands(struct sender_state *s) { } } else { - error("STREAM %s [send to %s] received unknown command over connection: %s", rrdhost_hostname(s->host), s->connected_to, words[0]?words[0]:"(unset)"); + netdata_log_error("STREAM %s [send to %s] received unknown command over connection: %s", rrdhost_hostname(s->host), s->connected_to, words[0]?words[0]:"(unset)"); } worker_is_busy(WORKER_SENDER_JOB_EXECUTE); @@ -1044,7 +1044,7 @@ static bool rrdpush_sender_pipe_close(RRDHOST *host, int *pipe_fds, bool reopen) int new_pipe_fds[2]; if(reopen) { if(pipe(new_pipe_fds) != 0) { - error("STREAM %s [send]: cannot create required pipe.", rrdhost_hostname(host)); + netdata_log_error("STREAM %s [send]: cannot create required pipe.", rrdhost_hostname(host)); new_pipe_fds[PIPE_READ] = -1; new_pipe_fds[PIPE_WRITE] = -1; ret = false; @@ -1084,7 +1084,7 @@ void rrdpush_signal_sender_to_wake_up(struct sender_state *s) { // signal the sender there are more data if (pipe_fd != -1 && write(pipe_fd, " ", 1) == -1) { - error("STREAM %s [send]: cannot write to internal pipe.", rrdhost_hostname(host)); + netdata_log_error("STREAM %s [send]: cannot write to internal pipe.", rrdhost_hostname(host)); rrdpush_sender_pipe_close(host, s->rrdpush_sender_pipe, true); } } @@ -1238,13 +1238,13 @@ void *rrdpush_sender_thread(void *ptr) { if(!rrdhost_has_rrdpush_sender_enabled(s->host) || !s->host->rrdpush_send_destination || !*s->host->rrdpush_send_destination || !s->host->rrdpush_send_api_key || !*s->host->rrdpush_send_api_key) { - error("STREAM %s [send]: thread created (task id %d), but host has streaming disabled.", + netdata_log_error("STREAM %s [send]: thread created (task id %d), but host has streaming disabled.", rrdhost_hostname(s->host), gettid()); return NULL; } if(!rrdhost_set_sender(s->host)) { - error("STREAM %s [send]: thread created (task id %d), but there is another sender running for this host.", + netdata_log_error("STREAM %s [send]: thread created (task id %d), but there is another sender running for this host.", rrdhost_hostname(s->host), gettid()); return NULL; } @@ -1282,7 +1282,7 @@ void *rrdpush_sender_thread(void *ptr) { pipe_buffer_size = 10 * 1024; if(!rrdpush_sender_pipe_close(s->host, s->rrdpush_sender_pipe, true)) { - error("STREAM %s [send]: cannot create inter-thread communication pipe. Disabling streaming.", + netdata_log_error("STREAM %s [send]: cannot create inter-thread communication pipe. Disabling streaming.", rrdhost_hostname(s->host)); return NULL; } @@ -1338,7 +1338,7 @@ void *rrdpush_sender_thread(void *ptr) { !rrdpush_sender_replicating_charts(s) )) { worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_TIMEOUT); - error("STREAM %s [send to %s]: could not send metrics for %d seconds - closing connection - we have sent %zu bytes on this connection via %zu send attempts.", rrdhost_hostname(s->host), s->connected_to, s->timeout, s->sent_bytes_on_this_connection, s->send_attempts); + netdata_log_error("STREAM %s [send to %s]: could not send metrics for %d seconds - closing connection - we have sent %zu bytes on this connection via %zu send attempts.", rrdhost_hostname(s->host), s->connected_to, s->timeout, s->sent_bytes_on_this_connection, s->send_attempts); rrdpush_sender_thread_close_socket(s->host); continue; } @@ -1359,7 +1359,7 @@ void *rrdpush_sender_thread(void *ptr) { if(unlikely(s->rrdpush_sender_pipe[PIPE_READ] == -1)) { if(!rrdpush_sender_pipe_close(s->host, s->rrdpush_sender_pipe, true)) { - error("STREAM %s [send]: cannot create inter-thread communication pipe. Disabling streaming.", + netdata_log_error("STREAM %s [send]: cannot create inter-thread communication pipe. Disabling streaming.", rrdhost_hostname(s->host)); rrdpush_sender_thread_close_socket(s->host); break; @@ -1411,7 +1411,7 @@ void *rrdpush_sender_thread(void *ptr) { // Only errors from poll() are internal, but try restarting the connection if(unlikely(poll_rc == -1)) { worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_POLL_ERROR); - error("STREAM %s [send to %s]: failed to poll(). Closing socket.", rrdhost_hostname(s->host), s->connected_to); + netdata_log_error("STREAM %s [send to %s]: failed to poll(). Closing socket.", rrdhost_hostname(s->host), s->connected_to); rrdpush_sender_pipe_close(s->host, s->rrdpush_sender_pipe, true); rrdpush_sender_thread_close_socket(s->host); continue; @@ -1433,7 +1433,7 @@ void *rrdpush_sender_thread(void *ptr) { debug(D_STREAM, "STREAM: Data added to send buffer (current buffer chunk %zu bytes)...", outstanding); if (read(fds[Collector].fd, thread_data->pipe_buffer, pipe_buffer_size) == -1) - error("STREAM %s [send to %s]: cannot read from internal pipe.", rrdhost_hostname(s->host), s->connected_to); + netdata_log_error("STREAM %s [send to %s]: cannot read from internal pipe.", rrdhost_hostname(s->host), s->connected_to); } // Read as much as possible to fill the buffer, split into full lines for execution. @@ -1461,7 +1461,7 @@ void *rrdpush_sender_thread(void *ptr) { if(error) { rrdpush_sender_pipe_close(s->host, s->rrdpush_sender_pipe, true); - error("STREAM %s [send to %s]: restarting internal pipe: %s.", + netdata_log_error("STREAM %s [send to %s]: restarting internal pipe: %s.", rrdhost_hostname(s->host), s->connected_to, error); } } @@ -1478,7 +1478,7 @@ void *rrdpush_sender_thread(void *ptr) { if(unlikely(error)) { worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_SOCKET_ERROR); - error("STREAM %s [send to %s]: restarting connection: %s - %zu bytes transmitted.", + netdata_log_error("STREAM %s [send to %s]: restarting connection: %s - %zu bytes transmitted.", rrdhost_hostname(s->host), s->connected_to, error, s->sent_bytes_on_this_connection); rrdpush_sender_thread_close_socket(s->host); } @@ -1488,7 +1488,7 @@ void *rrdpush_sender_thread(void *ptr) { if(unlikely(s->flags & SENDER_FLAG_OVERFLOW)) { worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_OVERFLOW); errno = 0; - error("STREAM %s [send to %s]: buffer full (allocated %zu bytes) after sending %zu bytes. Restarting connection", + netdata_log_error("STREAM %s [send to %s]: buffer full (allocated %zu bytes) after sending %zu bytes. Restarting connection", rrdhost_hostname(s->host), s->connected_to, s->buffer->size, s->sent_bytes_on_this_connection); rrdpush_sender_thread_close_socket(s->host); } diff --git a/tests/profile/benchmark-procfile-parser.c b/tests/profile/benchmark-procfile-parser.c index 214ca94178..069294bfca 100644 --- a/tests/profile/benchmark-procfile-parser.c +++ b/tests/profile/benchmark-procfile-parser.c @@ -207,7 +207,7 @@ procfile *procfile_readall1(procfile *ff) { debug(D_PROCFILE, "Reading file '%s', from position %zd with length %zd", procfile_filename(ff), s, (ssize_t)(ff->size - s)); r = read(ff->fd, &ff->data[s], ff->size - s); if(unlikely(r == -1)) { - if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot read from file '%s' on fd %d", procfile_filename(ff), ff->fd); + if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) netdata_log_error(PF_PREFIX ": Cannot read from file '%s' on fd %d", procfile_filename(ff), ff->fd); procfile_close(ff); return NULL; } @@ -217,7 +217,7 @@ procfile *procfile_readall1(procfile *ff) { // debug(D_PROCFILE, "Rewinding file '%s'", ff->filename); if(unlikely(lseek(ff->fd, 0, SEEK_SET) == -1)) { - if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot rewind on file '%s'.", procfile_filename(ff)); + if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) netdata_log_error(PF_PREFIX ": Cannot rewind on file '%s'.", procfile_filename(ff)); procfile_close(ff); return NULL; } diff --git a/tests/profile/test-eval.c b/tests/profile/test-eval.c index 3c463166db..17836f6e9b 100644 --- a/tests/profile/test-eval.c +++ b/tests/profile/test-eval.c @@ -231,7 +231,7 @@ NETDATA_DOUBLE evaluate(EVAL_NODE *op, int depth) { break; default: - error("I don't know how to handle operator '%c'", op->operator); + netdata_log_error("I don't know how to handle operator '%c'", op->operator); r = 0; break; } diff --git a/web/api/formatters/csv/csv.c b/web/api/formatters/csv/csv.c index 752f84949a..d81ddb34ee 100644 --- a/web/api/formatters/csv/csv.c +++ b/web/api/formatters/csv/csv.c @@ -79,7 +79,8 @@ void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const else { // generate the local date time struct tm tmbuf, *tm = localtime_r(&now, &tmbuf); - if(!tm) { error("localtime() failed."); continue; } + if(!tm) { + netdata_log_error("localtime() failed."); continue; } buffer_date(wb, tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); } diff --git a/web/api/formatters/json/json.c b/web/api/formatters/json/json.c index cff56484f3..7e3f400e99 100644 --- a/web/api/formatters/json/json.c +++ b/web/api/formatters/json/json.c @@ -159,7 +159,8 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable) { if(dates == JSON_DATES_JS) { // generate the local date time struct tm tmbuf, *tm = localtime_r(&now, &tmbuf); - if(!tm) { error("localtime_r() failed."); continue; } + if(!tm) { + netdata_log_error("localtime_r() failed."); continue; } if(likely(i != start)) buffer_fast_strcat(wb, ",\n", 2); buffer_fast_strcat(wb, pre_date, pre_date_len); diff --git a/web/api/health/health_cmdapi.c b/web/api/health/health_cmdapi.c index 0cc3e0bc7b..f54cf959cf 100644 --- a/web/api/health/health_cmdapi.c +++ b/web/api/health/health_cmdapi.c @@ -101,7 +101,7 @@ void health_silencers2file(BUFFER *wb) { fclose(fd); return; } - error("Silencer changes could not be written to %s. Error %s", silencers_filename, strerror(errno)); + netdata_log_error("Silencer changes could not be written to %s. Error %s", silencers_filename, strerror(errno)); } /** diff --git a/web/api/queries/weights.c b/web/api/queries/weights.c index fdb793bdc6..e569ce134c 100644 --- a/web/api/queries/weights.c +++ b/web/api/queries/weights.c @@ -1244,7 +1244,7 @@ static double kstwo( return NAN; if(unlikely(base_size != baseline_points - 1 || high_size != highlight_points - 1)) { - error("Metric correlations: internal error - calculate_pairs_diff() returns the wrong number of entries"); + netdata_log_error("Metric correlations: internal error - calculate_pairs_diff() returns the wrong number of entries"); return NAN; } @@ -1292,7 +1292,7 @@ NETDATA_DOUBLE *rrd2rrdr_ks2( stats->db_points_per_tier[tr] += r->internal.qt->db.tiers[tr].points; if(r->d != 1 || r->internal.qt->query.used != 1) { - error("WEIGHTS: on query '%s' expected 1 dimension in RRDR but got %zu r->d and %zu qt->query.used", + netdata_log_error("WEIGHTS: on query '%s' expected 1 dimension in RRDR but got %zu r->d and %zu qt->query.used", r->internal.qt->id, r->d, (size_t)r->internal.qt->query.used); goto cleanup; } @@ -1368,11 +1368,11 @@ static void rrdset_metric_correlations_ks2( // these conditions should never happen, but still let's check if(unlikely(prob < 0.0)) { - error("Metric correlations: kstwo() returned a negative number: %f", prob); + netdata_log_error("Metric correlations: kstwo() returned a negative number: %f", prob); prob = -prob; } if(unlikely(prob > 1.0)) { - error("Metric correlations: kstwo() returned a number above 1.0: %f", prob); + netdata_log_error("Metric correlations: kstwo() returned a number above 1.0: %f", prob); prob = 1.0; } diff --git a/web/api/web_api_v1.c b/web/api/web_api_v1.c index 03e83c8557..6fb493b792 100644 --- a/web/api/web_api_v1.c +++ b/web/api/web_api_v1.c @@ -161,11 +161,11 @@ char *get_mgmt_api_key(void) { if(fd != -1) { char buf[GUID_LEN + 1]; if(read(fd, buf, GUID_LEN) != GUID_LEN) - error("Failed to read management API key from '%s'", api_key_filename); + netdata_log_error("Failed to read management API key from '%s'", api_key_filename); else { buf[GUID_LEN] = '\0'; if(regenerate_guid(buf, guid) == -1) { - error("Failed to validate management API key '%s' from '%s'.", + netdata_log_error("Failed to validate management API key '%s' from '%s'.", buf, api_key_filename); guid[0] = '\0'; @@ -185,12 +185,12 @@ char *get_mgmt_api_key(void) { // save it fd = open(api_key_filename, O_WRONLY|O_CREAT|O_TRUNC, 444); if(fd == -1) { - error("Cannot create unique management API key file '%s'. Please adjust config parameter 'netdata management api key file' to a proper path and file.", api_key_filename); + netdata_log_error("Cannot create unique management API key file '%s'. Please adjust config parameter 'netdata management api key file' to a proper path and file.", api_key_filename); goto temp_key; } if(write(fd, guid, GUID_LEN) != GUID_LEN) { - error("Cannot write the unique management API key file '%s'. Please adjust config parameter 'netdata management api key file' to a proper path and file with enough space left.", api_key_filename); + netdata_log_error("Cannot write the unique management API key file '%s'. Please adjust config parameter 'netdata management api key file' to a proper path and file with enough space left.", api_key_filename); close(fd); goto temp_key; } @@ -973,7 +973,7 @@ inline int web_client_api_request_v1_registry(RRDHOST *host, struct web_client * else if(vhash == hash_search && !strcmp(value, "search")) action = 'S'; else if(vhash == hash_switch && !strcmp(value, "switch")) action = 'W'; #ifdef NETDATA_INTERNAL_CHECKS - else error("unknown registry action '%s'", value); + else netdata_log_error("unknown registry action '%s'", value); #endif /* NETDATA_INTERNAL_CHECKS */ } /* @@ -1003,7 +1003,7 @@ inline int web_client_api_request_v1_registry(RRDHOST *host, struct web_client * to_person_guid = value; } #ifdef NETDATA_INTERNAL_CHECKS - else error("unused registry URL parameter '%s' with value '%s'", name, value); + else netdata_log_error("unused registry URL parameter '%s' with value '%s'", name, value); #endif /* NETDATA_INTERNAL_CHECKS */ } @@ -1028,7 +1028,7 @@ inline int web_client_api_request_v1_registry(RRDHOST *host, struct web_client * switch(action) { case 'A': if(unlikely(!machine_guid || !machine_url || !url_name)) { - error("Invalid registry request - access requires these parameters: machine ('%s'), url ('%s'), name ('%s')", machine_guid ? machine_guid : "UNSET", machine_url ? machine_url : "UNSET", url_name ? url_name : "UNSET"); + netdata_log_error("Invalid registry request - access requires these parameters: machine ('%s'), url ('%s'), name ('%s')", machine_guid ? machine_guid : "UNSET", machine_url ? machine_url : "UNSET", url_name ? url_name : "UNSET"); buffer_flush(w->response.data); buffer_strcat(w->response.data, "Invalid registry Access request."); return HTTP_RESP_BAD_REQUEST; @@ -1039,7 +1039,7 @@ inline int web_client_api_request_v1_registry(RRDHOST *host, struct web_client * case 'D': if(unlikely(!machine_guid || !machine_url || !delete_url)) { - error("Invalid registry request - delete requires these parameters: machine ('%s'), url ('%s'), delete_url ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", delete_url?delete_url:"UNSET"); + netdata_log_error("Invalid registry request - delete requires these parameters: machine ('%s'), url ('%s'), delete_url ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", delete_url?delete_url:"UNSET"); buffer_flush(w->response.data); buffer_strcat(w->response.data, "Invalid registry Delete request."); return HTTP_RESP_BAD_REQUEST; @@ -1050,7 +1050,7 @@ inline int web_client_api_request_v1_registry(RRDHOST *host, struct web_client * case 'S': if(unlikely(!machine_guid || !machine_url || !search_machine_guid)) { - error("Invalid registry request - search requires these parameters: machine ('%s'), url ('%s'), for ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", search_machine_guid?search_machine_guid:"UNSET"); + netdata_log_error("Invalid registry request - search requires these parameters: machine ('%s'), url ('%s'), for ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", search_machine_guid?search_machine_guid:"UNSET"); buffer_flush(w->response.data); buffer_strcat(w->response.data, "Invalid registry Search request."); return HTTP_RESP_BAD_REQUEST; @@ -1061,7 +1061,7 @@ inline int web_client_api_request_v1_registry(RRDHOST *host, struct web_client * case 'W': if(unlikely(!machine_guid || !machine_url || !to_person_guid)) { - error("Invalid registry request - switching identity requires these parameters: machine ('%s'), url ('%s'), to ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", to_person_guid?to_person_guid:"UNSET"); + netdata_log_error("Invalid registry request - switching identity requires these parameters: machine ('%s'), url ('%s'), to ('%s')", machine_guid?machine_guid:"UNSET", machine_url?machine_url:"UNSET", to_person_guid?to_person_guid:"UNSET"); buffer_flush(w->response.data); buffer_strcat(w->response.data, "Invalid registry Switch request."); return HTTP_RESP_BAD_REQUEST; diff --git a/web/rtc/webrtc.c b/web/rtc/webrtc.c index 66e151ecbc..2a26bc4285 100644 --- a/web/rtc/webrtc.c +++ b/web/rtc/webrtc.c @@ -21,7 +21,7 @@ static void webrtc_log(rtcLogLevel level, const char *message) { case RTC_LOG_WARNING: case RTC_LOG_ERROR: case RTC_LOG_FATAL: - error("WEBRTC: %s", message); + netdata_log_error("WEBRTC: %s", message); break; case RTC_LOG_INFO: @@ -263,7 +263,7 @@ static size_t webrtc_send_in_chunks(WEBRTC_DC *chan, const char *data, size_t si total_message_size = -total_message_size; if(rtcSendMessage(chan->dc, send_buffer, total_message_size) != RTC_ERR_SUCCESS) - error("WEBRTC[%d],DC[%d]: failed to send LZ4 chunk %zu of %zu", chan->conn->pc, chan->dc, chunk, total_chunks); + netdata_log_error("WEBRTC[%d],DC[%d]: failed to send LZ4 chunk %zu of %zu", chan->conn->pc, chan->dc, chunk, total_chunks); else internal_error(true, "WEBRTC[%d],DC[%d]: sent chunk %zu of %zu, size %zu (total %d)", chan->conn->pc, chan->dc, chunk, total_chunks, message_size, total_message_size); @@ -403,7 +403,7 @@ static void myErrorCallback(int id __maybe_unused, const char *error, void *user WEBRTC_DC *chan = user_ptr; internal_fatal(chan->dc != id, "WEBRTC[%d],DC[%d]: dc mismatch, expected %d, got %d", chan->conn->pc, chan->dc, chan->dc, id); - error("WEBRTC[%d],DC[%d]: ERROR: '%s'", chan->conn->pc, chan->dc, error); + netdata_log_error("WEBRTC[%d],DC[%d]: ERROR: '%s'", chan->conn->pc, chan->dc, error); } static void myMessageCallback(int id __maybe_unused, const char *message, int size, void *user_ptr) { @@ -464,19 +464,19 @@ static void myDataChannelCallback(int pc __maybe_unused, int dc, void *user_ptr) chan->label = strdupz(label); if(rtcSetOpenCallback(dc, myOpenCallback) != RTC_ERR_SUCCESS) - error("WEBRTC[%d],DC[%d]: rtcSetOpenCallback() failed.", conn->pc, chan->dc); + netdata_log_error("WEBRTC[%d],DC[%d]: rtcSetOpenCallback() failed.", conn->pc, chan->dc); if(rtcSetClosedCallback(dc, myClosedCallback) != RTC_ERR_SUCCESS) - error("WEBRTC[%d],DC[%d]: rtcSetClosedCallback() failed.", conn->pc, chan->dc); + netdata_log_error("WEBRTC[%d],DC[%d]: rtcSetClosedCallback() failed.", conn->pc, chan->dc); if(rtcSetErrorCallback(dc, myErrorCallback) != RTC_ERR_SUCCESS) - error("WEBRTC[%d],DC[%d]: rtcSetErrorCallback() failed.", conn->pc, chan->dc); + netdata_log_error("WEBRTC[%d],DC[%d]: rtcSetErrorCallback() failed.", conn->pc, chan->dc); if(rtcSetMessageCallback(dc, myMessageCallback) != RTC_ERR_SUCCESS) - error("WEBRTC[%d],DC[%d]: rtcSetMessageCallback() failed.", conn->pc, chan->dc); + netdata_log_error("WEBRTC[%d],DC[%d]: rtcSetMessageCallback() failed.", conn->pc, chan->dc); // if(rtcSetAvailableCallback(dc, myAvailableCallback) != RTC_ERR_SUCCESS) -// error("WEBRTC[%d],DC[%d]: rtcSetAvailableCallback() failed.", conn->pc, chan->dc); +// netdata_log_error("WEBRTC[%d],DC[%d]: rtcSetAvailableCallback() failed.", conn->pc, chan->dc); internal_error(true, "WEBRTC[%d],DC[%d]: new data channel with label '%s'", chan->conn->pc, chan->dc, chan->label); } @@ -671,29 +671,29 @@ int webrtc_new_connection(const char *sdp, BUFFER *wb) { rtcSetUserPointer(conn->pc, conn); if(rtcSetLocalDescriptionCallback(conn->pc, myDescriptionCallback) != RTC_ERR_SUCCESS) - error("WEBRTC[%d]: rtcSetLocalDescriptionCallback() failed", conn->pc); + netdata_log_error("WEBRTC[%d]: rtcSetLocalDescriptionCallback() failed", conn->pc); if(rtcSetLocalCandidateCallback(conn->pc, myCandidateCallback) != RTC_ERR_SUCCESS) - error("WEBRTC[%d]: rtcSetLocalCandidateCallback() failed", conn->pc); + netdata_log_error("WEBRTC[%d]: rtcSetLocalCandidateCallback() failed", conn->pc); if(rtcSetStateChangeCallback(conn->pc, myStateChangeCallback) != RTC_ERR_SUCCESS) - error("WEBRTC[%d]: rtcSetStateChangeCallback() failed", conn->pc); + netdata_log_error("WEBRTC[%d]: rtcSetStateChangeCallback() failed", conn->pc); if(rtcSetGatheringStateChangeCallback(conn->pc, myGatheringStateCallback) != RTC_ERR_SUCCESS) - error("WEBRTC[%d]: rtcSetGatheringStateChangeCallback() failed", conn->pc); + netdata_log_error("WEBRTC[%d]: rtcSetGatheringStateChangeCallback() failed", conn->pc); if(rtcSetDataChannelCallback(conn->pc, myDataChannelCallback) != RTC_ERR_SUCCESS) - error("WEBRTC[%d]: rtcSetDataChannelCallback() failed", conn->pc); + netdata_log_error("WEBRTC[%d]: rtcSetDataChannelCallback() failed", conn->pc); // initialize the handshake internal_error(true, "WEBRTC[%d]: setting remote sdp: %s", conn->pc, sdp); if(rtcSetRemoteDescription(conn->pc, sdp, "offer") != RTC_ERR_SUCCESS) - error("WEBRTC[%d]: rtcSetRemoteDescription() failed", conn->pc); + netdata_log_error("WEBRTC[%d]: rtcSetRemoteDescription() failed", conn->pc); // initiate the handshake process if(conn->config.disableAutoNegotiation) { if(rtcSetLocalDescription(conn->pc, NULL) != RTC_ERR_SUCCESS) - error("WEBRTC[%d]: rtcSetLocalDescription() failed", conn->pc); + netdata_log_error("WEBRTC[%d]: rtcSetLocalDescription() failed", conn->pc); } bool logged = false; diff --git a/web/server/h2o/http_server.c b/web/server/h2o/http_server.c index be57d9be0c..341b5f6289 100644 --- a/web/server/h2o/http_server.c +++ b/web/server/h2o/http_server.c @@ -78,11 +78,11 @@ static int ssl_init() /* load certificate and private key */ if (SSL_CTX_use_PrivateKey_file(accept_ctx.ssl_ctx, key_fn, SSL_FILETYPE_PEM) != 1) { - error("Could not load server key from \"%s\"", key_fn); + netdata_log_error("Could not load server key from \"%s\"", key_fn); return -1; } if (SSL_CTX_use_certificate_file(accept_ctx.ssl_ctx, cert_fn, SSL_FILETYPE_PEM) != 1) { - error("Could not load certificate from \"%s\"", cert_fn); + netdata_log_error("Could not load certificate from \"%s\"", cert_fn); return -1; } @@ -318,14 +318,14 @@ void *h2o_main(void *ptr) { accept_ctx.hosts = config.hosts; if (create_listener(bind_addr, bind_port) != 0) { - error("failed to create listener %s:%d", bind_addr, bind_port); + netdata_log_error("failed to create listener %s:%d", bind_addr, bind_port); return NULL; } while (service_running(SERVICE_HTTPD)) { int rc = h2o_evloop_run(ctx.loop, POLL_INTERVAL); if (rc < 0 && errno != EINTR) { - error("h2o_evloop_run returned (%d) with errno other than EINTR. Aborting", rc); + netdata_log_error("h2o_evloop_run returned (%d) with errno other than EINTR. Aborting", rc); break; } } diff --git a/web/server/static/static-threaded.c b/web/server/static/static-threaded.c index 9295db6158..657230e8e7 100644 --- a/web/server/static/static-threaded.c +++ b/web/server/static/static-threaded.c @@ -180,7 +180,7 @@ static int web_server_file_write_callback(POLLINFO *pi, short int *events) { (void)events; worker_is_busy(WORKER_JOB_WRITE_FILE); - error("Writing to web files is not supported!"); + netdata_log_error("Writing to web files is not supported!"); worker_is_idle(); return -1; @@ -325,7 +325,7 @@ static int web_server_rcv_callback(POLLINFO *pi, short int *events) { if(fpi) w->pollinfo_filecopy_slot = fpi->slot; else { - error("Failed to add filecopy fd. Closing client."); + netdata_log_error("Failed to add filecopy fd. Closing client."); ret = -1; goto cleanup; } @@ -483,7 +483,7 @@ static void socket_listen_main_static_threaded_cleanup(void *ptr) { // } // // if(found) -// error("%d static web threads are taking too long to finish. Giving up.", found); +// netdata_log_error("%d static web threads are taking too long to finish. Giving up.", found); netdata_log_info("closing all web server sockets..."); listen_sockets_close(&api_sockets); diff --git a/web/server/web_client.c b/web/server/web_client.c index cdf088fede..982bbd0164 100644 --- a/web/server/web_client.c +++ b/web/server/web_client.c @@ -31,7 +31,7 @@ static inline int web_client_cork_socket(struct web_client *w __maybe_unused) { if(likely(web_client_is_corkable(w) && !w->tcp_cork && w->ofd != -1)) { w->tcp_cork = true; if(unlikely(setsockopt(w->ofd, IPPROTO_TCP, TCP_CORK, (char *) &w->tcp_cork, sizeof(int)) != 0)) { - error("%llu: failed to enable TCP_CORK on socket.", w->id); + netdata_log_error("%llu: failed to enable TCP_CORK on socket.", w->id); w->tcp_cork = false; return -1; @@ -58,7 +58,7 @@ static inline int web_client_uncork_socket(struct web_client *w __maybe_unused) if(likely(w->tcp_cork && w->ofd != -1)) { w->tcp_cork = false; if(unlikely(setsockopt(w->ofd, IPPROTO_TCP, TCP_CORK, (char *) &w->tcp_cork, sizeof(int)) != 0)) { - error("%llu: failed to disable TCP_CORK on socket.", w->id); + netdata_log_error("%llu: failed to disable TCP_CORK on socket.", w->id); w->tcp_cork = true; return -1; } @@ -521,7 +521,7 @@ static int mysendfile(struct web_client *w, char *filename) { w->ifd = w->ofd; if(errno == EBUSY || errno == EAGAIN) { - error("%llu: File '%s' is busy, sending 307 Moved Temporarily to force retry.", w->id, web_filename); + netdata_log_error("%llu: File '%s' is busy, sending 307 Moved Temporarily to force retry.", w->id, web_filename); w->response.data->content_type = CT_TEXT_HTML; buffer_sprintf(w->response.header, "Location: /%s\r\n", filename); buffer_strcat(w->response.data, "File is currently busy, please try again later: "); @@ -529,7 +529,7 @@ static int mysendfile(struct web_client *w, char *filename) { return HTTP_RESP_REDIR_TEMP; } else { - error("%llu: Cannot open file '%s'.", w->id, web_filename); + netdata_log_error("%llu: Cannot open file '%s'.", w->id, web_filename); w->response.data->content_type = CT_TEXT_HTML; buffer_strcat(w->response.data, "Cannot open file: "); buffer_strcat_htmlescape(w->response.data, web_filename); @@ -566,7 +566,7 @@ void web_client_enable_deflate(struct web_client *w, int gzip) { } if(unlikely(w->response.sent)) { - error("%llu: Cannot enable compression in the middle of a conversation.", w->id); + netdata_log_error("%llu: Cannot enable compression in the middle of a conversation.", w->id); return; } @@ -587,13 +587,13 @@ void web_client_enable_deflate(struct web_client *w, int gzip) { w->response.zstream.opaque = Z_NULL; // if(deflateInit(&w->response.zstream, Z_DEFAULT_COMPRESSION) != Z_OK) { -// error("%llu: Failed to initialize zlib. Proceeding without compression.", w->id); +// netdata_log_error("%llu: Failed to initialize zlib. Proceeding without compression.", w->id); // return; // } // Select GZIP compression: windowbits = 15 + 16 = 31 if(deflateInit2(&w->response.zstream, web_gzip_level, Z_DEFLATED, 15 + ((gzip)?16:0), 8, web_gzip_strategy) != Z_OK) { - error("%llu: Failed to initialize zlib. Proceeding without compression.", w->id); + netdata_log_error("%llu: Failed to initialize zlib. Proceeding without compression.", w->id); return; } @@ -977,7 +977,7 @@ static inline char *web_client_valid_method(struct web_client *w, char *s) { memcpy(hostname,"not available",13); hostname[13] = 0x00; } - error("The server is configured to always use encrypted connections, please enable the SSL on child with hostname '%s'.",hostname); + netdata_log_error("The server is configured to always use encrypted connections, please enable the SSL on child with hostname '%s'.",hostname); s = NULL; } #endif @@ -1281,7 +1281,7 @@ static inline void web_client_send_http_header(struct web_client *w) { count++; if(count > 100 || (errno != EAGAIN && errno != EWOULDBLOCK)) { - error("Cannot send HTTP headers to web client."); + netdata_log_error("Cannot send HTTP headers to web client."); break; } } @@ -1292,7 +1292,7 @@ static inline void web_client_send_http_header(struct web_client *w) { count++; if(count > 100 || (errno != EAGAIN && errno != EWOULDBLOCK)) { - error("Cannot send HTTP headers to web client."); + netdata_log_error("Cannot send HTTP headers to web client."); break; } } @@ -1302,7 +1302,7 @@ static inline void web_client_send_http_header(struct web_client *w) { count++; if(count > 100 || (errno != EAGAIN && errno != EWOULDBLOCK)) { - error("Cannot send HTTP headers to web client."); + netdata_log_error("Cannot send HTTP headers to web client."); break; } } @@ -1313,8 +1313,7 @@ static inline void web_client_send_http_header(struct web_client *w) { w->statistics.sent_bytes += bytes; if (bytes < 0) { - - error("HTTP headers failed to be sent (I sent %zu bytes but the system sent %zd bytes). Closing web client." + netdata_log_error("HTTP headers failed to be sent (I sent %zu bytes but the system sent %zd bytes). Closing web client." , buffer_strlen(w->response.header_output) , bytes); @@ -1519,7 +1518,7 @@ static inline int web_client_process_url(RRDHOST *host, struct web_client *w, ch else buffer_strcat(w->response.data, "I am doing it already"); - error("web request to exit received."); + netdata_log_error("web request to exit received."); netdata_cleanup_and_exit(0); return HTTP_RESP_OK; } @@ -1773,7 +1772,7 @@ void web_client_process_request(struct web_client *w) { { long len = sendfile(w->ofd, w->ifd, NULL, w->response.data->rbytes); if(len != w->response.data->rbytes) - error("%llu: sendfile() should copy %ld bytes, but copied %ld. Falling back to manual copy.", w->id, w->response.data->rbytes, len); + netdata_log_error("%llu: sendfile() should copy %ld bytes, but copied %ld. Falling back to manual copy.", w->id, w->response.data->rbytes, len); else web_client_request_done(w); } @@ -1932,7 +1931,7 @@ ssize_t web_client_send_deflate(struct web_client *w) // compress if(deflate(&w->response.zstream, flush) == Z_STREAM_ERROR) { - error("%llu: Compression failed. Closing down client.", w->id); + netdata_log_error("%llu: Compression failed. Closing down client.", w->id); web_client_request_done(w); return(-1); }