mirror of
https://github.com/netdata/netdata.git
synced 2025-04-15 10:04:15 +00:00

This PR adds the logs-management external plugin. See the included README for an extensive list of features. ------------------------------------------------------------------------------------- * Add proper status return in JSON response of functions * Add column info to functions * Escape special characters when returning JSON response * Add proper functions help and defaults. Fix help not working * Add 'logs_management_meta' object in functions results * Fix compiler warnings * Replace tabs with 3 spaces in web_client_api_request_v1_logsmanagement_sources() * Add 'sources' in functions to display list of log sources * Update functions column values for logs * Update chart titles and remove '/s' from units * Add support for compound queries in circular buffers * Refactor circ_buff_search() to get rid of circ_buff_search_compound() * Fix incorrect docker events nano timestamp padding * Fixed botched rebasing * Replace get_unix_time_ms() with now_realtime_msec() * Remove binary generation from Fluent-Bit lib build * Fix compiler warnings due to new timestamp type * Remove STDIN and STDOUT support from Fluent-Bit library * Initial support for FLB_KMSG kernel logs collection * Add kernel logs charts * Add kernel logs subsystem and device charts * Skip collection of pre-existing logs in kmsg ring buffer * Add example of custom kmsg charts * Add extra initialization error logs * Fix bug of Docker Events collector failure disabling whole logs management engine * Remove reduntant FLB output code * Remove some obsolete TODO comments * Remove some commented out error/debug prints * Disable some Fluent-Bit config options not required * Make circular buffer spare items option configurable * Add DB mode configuration option * Replace p_file_infos_arr->data[i] with p_file_info in db_api.c * Remove db_loop due to all function calls being synchronous * Add initial README.md * Add DB mode = none changes * Add a simple webpage to visualize log query results * Add support for source selection to logs_query.html * Add option to query multiple log sources * Mark non-queryable sources as such in logs_query.html * Add option to use either GET or functions request in logs_query.html * Install logs_query.html when running stress tests * Update README.md requirements * Change installer behavior to build logs management by default * Disable logs management at runtime by default * Add global db mode configuration in 'logs management' config section * Split logsmanagement.conf into required & optional sections * Remove --enable-logsmanagement from stress test script * Add global config option for 'circular buffer max size MiB' * Add global config option for 'circular buffer drop logs if full' * Update 'General Configuration' in README.md * Add global config option for remaining optional settings * Add systemd collector requirements to TOC * README: Convert general configuration to table * README: Fix previous botched commit * Enable logs management by default when building for stress testing * Move logging to collector.log from error.log * Fix contenttype compilation errors * Move logging to collector.log in plugin_logsmanagement.c * Rename 'rows' to 'records' in charts * Add Netdata error.log parsing * Add more dashboard descriptions * Sanitize chart ids * Attempt to fix failing CI * Update README.md * Update README.md * Another attempt to fix CI failures * Fix undefined reference to 'uv_sleep' on certain platforms * Support FLB forward input and FLB output plugins. Squashed commit of the following: commit 55e2bf4fb34a2e02ffd0b280790197310a5299f3 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Apr 13 16:41:09 2023 +0300 Remove error.log from stock config commit bbdc62c2c9727359bc3c8ef8c33ee734d0039be7 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Apr 13 16:37:48 2023 +0300 Add cleanup of Fluent Bit outputs in p_file_info_destroy() commit 09b0aa4268ec1ccef160c99c5d5f31b6388edd28 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Apr 13 14:34:17 2023 +0300 Some code and config cleanup commit 030d074667d5ee2cad10f85cd836ca90e29346ad Author: Dim-P <dimitris1703@gmail.com> Date: Thu Apr 13 13:04:08 2023 +0300 Enable additional Fluent Bit output plugins for shared library commit 490aa5d44caa38042521d24c6b886b8b4a59a73c Author: Dim-P <dimitris1703@gmail.com> Date: Thu Apr 13 01:33:19 2023 +0300 Add initialization of Fluent Bit user-configured outputs commit c96e9fe9cea96549aa5eae09d0deeb130da02793 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Apr 4 23:13:16 2023 +0100 Complete read of parameters for FLB outputs config commit 00988897f9b86d1ecc5c141b19df7ad7d74f7e96 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Apr 3 19:43:31 2023 +0100 Update README.md commit 6deea5399c2707942aeaa51408f999ca45dfd351 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Apr 3 16:02:28 2023 +0100 Refactor Syslog_parser_config_t and add Flb_socket_config_t commit 7bf998a4c298bbd489ef735c56a6e85a137772c9 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Apr 3 14:19:57 2023 +0100 Update README.md commit c353d194b12c54f134936072ebaded0424d73cc0 Author: Dim-P <dimitris1703@gmail.com> Date: Fri Mar 31 14:52:57 2023 +0100 Update README.md commit 6be726eaff3738ba7884de799aa52949833af65a Author: Dim-P <dimitris1703@gmail.com> Date: Fri Mar 31 13:06:29 2023 +0100 Update README. Fix docker_events streaming commit 6aabfb0f1ef0529a7a0ecbaf940bc0952bf42518 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Mar 30 21:27:45 2023 +0100 Fix stuck in infinite loop bug for FLB_GENERIC, FLB_WEB_LOG and FLB_SERIAL remote log sources commit eea6346b708cc7a5ce6e2249366870f4924eabae Author: Dim-P <dimitris1703@gmail.com> Date: Thu Mar 30 21:04:12 2023 +0100 Remove callback that searches for streamed p_file_info match commit bc9c5a523b0b0ab5588adbff391a43ba8d9a0cdf Author: Dim-P <dimitris1703@gmail.com> Date: Thu Mar 30 15:51:39 2023 +0100 Basic streaming works commit 4c80f59f0214bc07895f0b2edca47cb02bc06420 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Mar 28 22:05:22 2023 +0100 WIP commit eeb37a71b602fb0738fe8077ccddc0a8ce632304 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Mar 27 22:52:09 2023 +0100 Add generic forward streaming input commit 1459b91847c80c4d97de96b75b00771039458ad6 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Mar 23 18:50:14 2023 +0000 FLB_FORWARD: WIP * Add number of logs per item in DB and in queries response * Fix wrong number of lines stored in DB for web logs * Refactor number of logs parsers and charts code * Add option to toggle number of collected logs metrics and charts * Disable kmsg log collector by default * Fix logs_query.html to work with any server ip * Fix regressed wrong number of web log lines bug * Change query quota type from size_t to long long * Update alpine version when searching for fts-dev requirements * Update query results to return both requested and actual quota * Fix bug of circ buffs not being read if head == read but not empty * Squashed commit of the following: commit 34edb316a737f3edcffcf8fa88a3801599011495 Author: Dim-P <dimitris1703@gmail.com> Date: Thu May 4 20:02:36 2023 +0100 Comment out some debug prints commit 51b9b87a88516186530f5b4b65f785b543fefe8c Author: Dim-P <dimitris1703@gmail.com> Date: Fri Apr 28 19:21:54 2023 +0100 Fix wrong filenames in BLOBS_TABLE after rotation commit 6055fc2893b48661af324f20ee61511a40abbc02 Author: Dim-P <dimitris1703@gmail.com> Date: Fri Apr 28 12:22:04 2023 +0100 Add chart showing number of circular buffer items commit 0bb5210b0847f4b7596f633ec96fc10aa8ebc791 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Apr 25 16:47:29 2023 +0300 Various fixes. Fix num_lines calculation. Add debug prints for circ buffers. Remove circ buff spare items option. Fix calculation of circ buff memory consumption. Add buff_realloc_rwlock for db_mode = none case. Fix circ buff read to be done correctly when buff is full. commit f494af8c95be84404c7d854494d26da3bcbd3ad7 Author: Dim-P <dimitris1703@gmail.com> Date: Fri Apr 21 16:03:50 2023 +0300 Fix freez() on non-malloced address commit cce6d09e9cf9b847aface7309643e2c0a6041390 Author: Dim-P <dimitris1703@gmail.com> Date: Fri Apr 21 15:41:25 2023 +0300 Add option to dynamically expand circ buffs when full * Use log timestamps when possible, instead of collection timestamps. Also, add config options for Fluent Bit engine and remove tail_plugin. Squashed commit of the following: commit b16a02eb6e3a90565c90e0a274b87b123e7b18e5 Author: Dim-P <dimitris1703@gmail.com> Date: Tue May 16 19:38:57 2023 +0100 Add Fluent Bit service config options to netdata.conf. Add monitoring of new log file fluentbit.log commit ab77c286294548ea62a3879ac0f8b8bbfe6a0687 Author: Dim-P <dimitris1703@gmail.com> Date: Mon May 15 21:25:17 2023 +0100 Remove some debug prints commit 46d64ad2434e69b1d20720297aec1ddb869e1f84 Author: Dim-P <dimitris1703@gmail.com> Date: Mon May 15 21:19:32 2023 +0100 Fix null values in charts commit 8ec96821d6a882f28cbd19244ebdfc86c807d2f4 Author: Dim-P <dimitris1703@gmail.com> Date: Mon May 15 17:43:04 2023 +0100 Update README.md to reflect log timestamp changes commit 079a91858cf9db2f74711581235bc17eb97c7dad Author: Dim-P <dimitris1703@gmail.com> Date: Mon May 15 16:23:14 2023 +0100 Add configurable option for 'update timeout' commit 72b5e2505d4657fcbb5ccb6eeee00c45eb0b51ff Author: Dim-P <dimitris1703@gmail.com> Date: Mon May 15 16:05:08 2023 +0100 Revert logsmanagement.conf to logs-manag-master one commit 70d0ea6f8d272fff318aa3095d90a78dcc3411a7 Author: Dim-P <dimitris1703@gmail.com> Date: Mon May 15 16:02:00 2023 +0100 Fix bug of circ buff items not marked as done commit 5716420838771edb7842be4669bf96235b15cf71 Author: Dim-P <dimitris1703@gmail.com> Date: Mon May 15 16:01:41 2023 +0100 Fix do_custom_charts_update() to work for all log sources commit a8def8f53fd25c3efa56ef27e267df3261913a8e Author: Dim-P <dimitris1703@gmail.com> Date: Fri May 12 18:20:20 2023 +0100 Remove GENERIC and WEB_LOG cases. Remove tail_plugin.c/h. Remove generic_parser(). commit 1cf05966e33491dbeb9b877f18d1ea8643aabeba Author: Dim-P <dimitris1703@gmail.com> Date: Fri May 12 16:54:59 2023 +0100 Fix FLB_GENERIC and FLB_SERIAL to work with new timestamp logic commit df3266810531f1af5f99b666fbf44c503b304a39 Author: Dim-P <dimitris1703@gmail.com> Date: Fri May 12 14:55:04 2023 +0100 Get rid of *_collect() functions and restructure plugin_logsmanagement workers commit 3eee069842f3257fffe60dacfc274363bc43491c Author: Dim-P <dimitris1703@gmail.com> Date: Fri May 12 14:28:33 2023 +0100 Fix wrong order of #define _XOPEN_SOURCE 700 in parser.c commit 941aa80cb55d5a7d6fe8926da930d9803be52312 Author: Dim-P <dimitris1703@gmail.com> Date: Thu May 11 22:27:39 2023 +0100 Update plugin_logsmanagement_web_log to use new timestamp logic and to support delayed logs. Refactor req_method metrics code. commit 427a7d0e2366d43cb5eab7daa1ed82dfc3bc8bc8 Author: Dim-P <dimitris1703@gmail.com> Date: Tue May 9 20:26:08 2023 +0100 Update plugin_logsmanagement_kernel to use new timestamp logic and to support delayed charts commit a7e95a6d3e5c8b62531b671fd3ec7b8a3196b5bb Author: Dim-P <dimitris1703@gmail.com> Date: Tue May 9 15:22:14 2023 +0100 Update plugin_logsmanagement_systemd to use new timestamp logic and support delayed charts commit 48237ac2ce49c82abdf2783952fd9f0ef05d72e1 Author: Dim-P <dimitris1703@gmail.com> Date: Tue May 9 13:29:44 2023 +0100 Refactor number of collected logs chart update code commit a933c8fcae61c23fa0ec6d0074526ac5d243cf16 Author: Dim-P <dimitris1703@gmail.com> Date: Mon May 8 22:11:19 2023 +0100 Update plugin_logsmanagement_docker_ev to use new timestamp logic and support delayed charts commit 5d8db057155affd5cb721399a639d75a81801b7f Author: Dim-P <dimitris1703@gmail.com> Date: Fri May 5 15:18:06 2023 +0100 Change some Fluent Bit collectors to use log timestamps instead of collection timestamps * Remove some unused defines and typedefs * Improve flb_init() * Update file-level doxygen. Add SPDX license declaration. * Better handling of termination of Fluent Bit * Better handling of DB errors. Various fixes. Squashed commit of the following: commit f55feea1274c3857eda1e9d899743db6e3eb5bf5 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Jun 6 13:28:00 2023 +0100 Fix web log parsing in case of lines terminated by \r commit 9e05758a4ecfac57a0db14757cff9536deda51d8 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Jun 5 20:42:05 2023 +0100 Fix warnings due to -Wformat-truncation=2 commit 63477666fa42446d74693aae542580d4e1e81f03 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Jun 5 16:48:45 2023 +0100 Autodiscovery of Netdata error.log based on netdata_configured_log_dir commit cab5e6d6061f4259172bbf72666e8b4a3a35dd66 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Jun 5 16:24:39 2023 +0100 Replace Forward config default string literals with macros commit 4213398031dbb53afbc943d76bf7df202d12bf6f Author: Dim-P <dimitris1703@gmail.com> Date: Mon Jun 5 15:56:29 2023 +0100 Proper cleanup of flb_lib_out_cb *callback in case of error commit f76fd7cc7bc2d0241e4d3517f61ae192d4246300 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Jun 5 15:36:07 2023 +0100 Proper termination of Forward input and respective log sources in case of error commit 3739fd96c29e13298eb3a6e943a63172cdf39d5f Author: Dim-P <dimitris1703@gmail.com> Date: Thu Jun 1 21:19:56 2023 +0100 Merge db_search() and db_search_compound() commit fcface90cb0a6df3c3a2de5e1908b1b3467dd579 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Jun 1 19:17:26 2023 +0100 Proper error handling in db_search() and db_search_compound(). Refactor the code too. commit c10667ebee2510a1af77114b3a7e18a0054b5dae Author: Dim-P <dimitris1703@gmail.com> Date: Thu Jun 1 14:23:34 2023 +0100 Update DB mode and dir when switching to db_mode_none commit d37d4c3d79333bb9fa430650c13ad625458620e8 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Jun 1 12:56:13 2023 +0100 Fix flb_stop() SIGSEGV commit 892e231c68775ff1a1f052d292d26384f1ef54b1 Author: Dim-P <dimitris1703@gmail.com> Date: Tue May 30 21:14:58 2023 +0100 Switch to db_writer_db_mode_none if db_writer_db_mode_full encounters error commit f7a0c2135ff61d3a5b0460ec5964eb6bce164bd6 Author: Dim-P <dimitris1703@gmail.com> Date: Mon May 29 21:41:21 2023 +0100 Complete error handling changes to db_init(). Add some const type qualifiers. Refactor some code for readability commit 13dbeac936d22958394cb1aaec394384f5a93fdd Author: Dim-P <dimitris1703@gmail.com> Date: Mon May 29 17:14:17 2023 +0100 More error handling changes in db_init(). Change some global default settings if stress testing. commit eb0691c269cd09054190bf0ee9c4e9247b4a2548 Author: Dim-P <dimitris1703@gmail.com> Date: Fri May 26 23:29:12 2023 +0100 Better handling of db writer threads errors. Add db timings charts * Fix mystrsep() replaced by strsep_skip_consecutive_separators() * Fix older GCC failure due to label before declaration * Fix failed builds when using libuv <= v1.19 * Fix some Codacy warnings * Fix warning: implicit declaration of function ‘strsep’ * Use USEC_PER_SEC instead of 1000000ULL * Use UUID_STR_LEN instead of GUID_LEN + 1 * Combine multiple 'ln -sf' Docker instructions to one * Update README with systemd development libraries requirement * Comment out mallocz() success checkes in parser_csv() * Fix shellcheck warnings * Remove asserts for empty SYSLOG_IDENTIFIER or PID * Fix FreeBSD failing builds * Fix some more shellcheck warnings * Update Alpine fts-dev required packages * First changes to use web log timestamp for correct metrics timings * Initial work to add test_parse_web_log_line() unit test * Complete test_parse_web_log_line() tests * Improve parse_web_log_line() for better handling of \n, \r, double quotes etc. * Fix 'Invalid TIME error when timezone sign is negative * Add more logs to compression unit test case * Misc read_last_line() improvements * Fix failing test_auto_detect_web_log_parser_config() when test case terminated without '\n' * Remove unused preprocessor macro * Factor out setup of parse_config_expected_num_fields * Add test for count_fields() * Add unit test for read_last_line() * Fix a read_last_line() bug * Remove PLUGIN[logsmanagement] static thread and update charts synchronously, right before data buffering * Fix web log parser potential SIGSEGV * Fix web log metrics bug where they could show delayed by 1 collection interval * WIP: Add multiline support to kmsg logs and fix metric timings * Fix kmsg subsystem and device parsing and metrics * Add option 'use log timestamp' to select between log timestamps or collection timestamps * Add 'Getting Started' docs section * Move logs management functions code to separate source files * Add 'Nginx access.log' chart description * Remove logsmanagement.plugin source files * Fix some memory leaks * Improve cleanup of logsmanagement_main() * Fix a potential memory leak of fwd_input_out_cb * Better termination and cleanup of main_loop and its handles * Fix main_db_dir access() check bug * Avoid uv_walk() SIGSEGV * Remove main_db_dir access() check * Better termination and cleanup of DB code * Remove flb_socket_config_destroy() that could cause a segmentation fault * Disable unique client IPs - all-time chart by default * Update README.md * Fix debug() -> netdata_log_debug() * Fix read_last_line() * Fix timestamp sign adjustment and wrong unit tests * Change WEB_CLIENT_ACL_DASHBOARD to WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC * Do not parse web log timestamps if 'use_log_timestamp = no' * Add Logs Management back into buildinfo.c * Update README.md * Do not build Fluent Bit executable binary * Change logs rate chart to RRDSET_TYPE_LINE * Add kludge to prevent metrics breaking due to out of order logs * Fix wrong flb_tmp_buff_cpy_timer expiration * Refactor initialization of input plugin for local log sources. * Rename FLB_GENERIC collector to FLB_TAIL. * Switch 'Netdata fluentbit.log' to disabled by default * Add 'use inotify' configuration option * Update in README.md * Add docker event actions metrics * Update README.md to include event action chart * Remove commented out PLUGIN[logsmanagement] code block * Fix some warnings * Add documentation for outgoing log streaming and exporting * Fix some code block formatting in README.md * Refactor code related to error status of log query results and add new invalid timestamp case * Reduce query mem allocs and fix end timestamp == 0 bug * Add support for duplicate timestamps in db_search() * Add support for duplicate timestamps in circ_buff_search() * Fix docker events contexts * Various query fixes prior to reverse order search. - Add reverse qsort() function in circ buffers. - Fix issues to properly support of duplicate timestamps. - Separate requested from actual timestamps in query parameters. - Rename results buffer variable name to be consistent between DB and buffers. - Remove default start and end timestamp from functions. - Improve handling of invalid quotas provided by users. - Rename 'until' timestamp name to 'to'. - Increase default quota to 10MB from 1MB. - Allow start timestamp to be > than end timestamp. * Complete descending timestamp search for circular buffers * Complete descending timestamp search for DB * Remove MEASURE_QUERY_TIME code block * Complete descending timestamp search when data resides in both DB and circular buffers * Use pointer instead of copying res_hdr in query results * Refactor web log timezone parsing to use static memory allocation * Add stats for CPU user & system time per MiB of query results * Micro-optimization to slightly speed up queries * More micro-optimizations and some code cleanup * Remove LOGS_QUERY_DATA_FORMAT_NEW_LINE option * Escape iscntrl() chars at collection rather at query * Reduce number of buffer_strcat() calls * Complete descending timestamp order queries for web_api_v1 * Complete descending timestamp order queries for functions * Fix functions query timings to match web_api_v1 ones * Add MQTT message collector Squashed commit of the following: commit dbe515372ee04880b1841ef7800abe9385b12e1c Author: Dim-P <dimitris1703@gmail.com> Date: Mon Aug 21 15:18:46 2023 +0100 Update README.md with MQTT information commit c0b5dbcb7cdef8c6fbd5e72e7bdd08957a0fd3de Author: Dim-P <dimitris1703@gmail.com> Date: Mon Aug 21 14:59:36 2023 +0100 Tidy up before merge commit 9a69c4f17eac858532918a8f850a770b12710f80 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Aug 21 12:54:33 2023 +0100 Fix issue with duplicate Log_Source_Path in DB, introduced in commit e417af3 commit 48213e9713216d62fca8a5bc1bbc41a3883fdc14 Author: Dim-P <dimitris1703@gmail.com> Date: Sat Aug 19 05:05:36 2023 +0100 WIP commit e417af3b947f11bd61e3255306bc95953863998d Author: Dim-P <dimitris1703@gmail.com> Date: Thu Aug 17 18:03:39 2023 +0100 Update functions logsmanagement help output * Inhibit Fluent Bit build warnings * Fix missing allow_subpaths value in api_commands_v1[]. * Fix missing HTTP_RESP_BACKEND_FETCH_FAILED error * Fix an enum print warning * Remove systemd-devel requirement from README and fix codacy warnings * Update Alpine versions for musl-fts-dev * Update Fluent Bit to v2.1.8 Squashed commit of the following: commit faf6fc4b7919cc2611124acc67cb1973ce705530 Author: Dim-P <dimitris1703@gmail.com> Date: Fri Aug 25 17:13:30 2023 +0100 Fix wrong default CORE_STACK_SIZE on Alpine commit a810238fe7830ce626f6d57245d68035b29723f7 Author: Dim-P <dimitris1703@gmail.com> Date: Fri Aug 25 00:40:02 2023 +0100 Update Fluent Bit patches for musl commit 8bed3b611dba94a053e22c2b4aa1d46f7787d9b4 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Aug 24 21:54:38 2023 +0100 Fix an edge case crash when web log method is '-' commit b29b48ea230363142697f9749508cd926e18ee19 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Aug 24 16:26:13 2023 +0100 Disable FLB_OUT_CALYPTIA to fix Alpine dlsym() error commit eabe0d0523ffe98ff881675c21b0763a49c05f16 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Aug 22 21:25:54 2023 +0100 Add 'use inotify = no' troubleshooting Q&A in README commit 7f7ae85bdb0def63b4fc05ab88f6572db948e0e7 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Aug 22 18:06:36 2023 +0100 Update README.md links to latest version commit 610c5ac7b920d4a1dfe364ad48f1ca14a0acc346 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Aug 22 16:23:30 2023 +0100 Update flb_parser_create() definition commit f99608ff524b6f3462264e626a1073f9c2fdfdf5 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Aug 22 16:23:04 2023 +0100 Add new config.cmake options commit 446b0d564626055a0a125f525d0bd3754184b830 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Aug 22 12:21:25 2023 +0100 Update Fluent Bit submodule to v2.1.8 * Add logs_management_unittest() to CI 'unittest' * Remove obsolete query testing files * Patch Fluent Bit log format to match netdata's format * Update README with instructions on how to monitor Podman events logs * Fix core dump in case of flb_lib_path dlopen() * Fix some potential compiler warnings * Fix queries crash if logs manag engine not running * Much faster termination of LOGS MANAGEMENT * Add facets support and other minor fixes. logsmanagement_function_execute_cb() is replaced by logsmanagement_function_facets() which adds facets support to logs management queries. Internal query results header now includes additional fields (log_source, log_type, basename, filename, chartname), that are used as facets. Queries now support timeout as a query parameter. A web log timestamp bug is fixed (by using timegm() instead of mktime(). web_api_v1 logsmanagement API is only available in debugging now. Squashed commit of the following: commit 32cf0381283029d793ec3af30d96e6cd77ee9149 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Sep 19 16:21:32 2023 +0300 Tidy up commit f956b5846451c6b955a150b5d071947037e935f0 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Sep 19 13:30:54 2023 +0300 Add more accepted params. Add data_only option. Add if_modified_since option. commit 588c2425c60dcdd14349b7b346467dba32fda4e9 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Sep 18 18:39:50 2023 +0300 Add timeout to queries commit da0f055fc47a36d9af4b7cc4cefb8eb6630e36d9 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 14 19:17:16 2023 +0300 Fix histogram commit 7149890974e0d26420ec1c5cfe1023801dc973fa Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 14 17:58:52 2023 +0300 Add keyword query using simple patterns and fix descending timestamp values commit 0bd068c5a76e694b876027e9fa5af6f333ab825b Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 14 13:54:05 2023 +0300 Add basename, filename, chartname as facets commit 023c2b5f758b2479a0e48da575cd59500a1373b6 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 14 13:26:06 2023 +0300 Add info and sources functions options commit ab4d555b7d445f7291af474847bd9177d3726a76 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 14 12:54:37 2023 +0300 Fix facet id filter commit a69c9e2732f5a6da1764bb57d1c06d8d65979225 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 14 12:07:13 2023 +0300 WIP: Add facet id filters commit 3c02b5de81fa8a20c712863c347539a52936ddd8 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Sep 12 18:19:17 2023 +0300 Add log source and log type to circ buff query results header commit 8ca98672c4911c126e50f3cbdd69ac363abdb33d Author: Dim-P <dimitris1703@gmail.com> Date: Tue Sep 12 18:18:13 2023 +0300 Fix logsmanagement facet function after master rebasing commit 3f1517ad56cda2473a279a8d130bec869fc2cbb8 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Sep 12 18:14:25 2023 +0300 Restrict /logsmanagement to ACL_DEV_OPEN_ACCESS only commit 8ca98d69b08d006c682997268d5d2523ddde6be0 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Sep 12 14:40:22 2023 +0300 Fix incorrectly parsed timestamps due to DST commit f9b0848037b29c7fcc46da951ca5cd9eb129066f Author: Dim-P <dimitris1703@gmail.com> Date: Mon Sep 11 13:42:18 2023 +0300 Add logs_management_meta object to facet query results commit babc978f6c97107aaf8b337d8d31735d61761b6a Author: Dim-P <dimitris1703@gmail.com> Date: Mon Sep 11 13:03:52 2023 +0300 Query all sources if no arguments provided commit 486d56de87af56aae6c0dc5d165341418222ce8b Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 7 18:38:04 2023 +0300 Add log_source and log_type (only for DB logs) as facets. Add relative time support commit b564c12843d355c4da6436af358d5f352cb58bfe Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 7 13:47:20 2023 +0300 Working facet with descending timestamps commit 68c6a5c64e8425cf28ec16adfb0c50289caa82a9 Author: Dim-P <dimitris1703@gmail.com> Date: Wed Sep 6 01:55:51 2023 +0300 WIP * Fix linking errors * Convert logs management to external plugin. Squashed commit of the following: commit 16da6ba70ebde0859aed734087f04af497ce3a77 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 24 18:44:12 2023 +0100 Use higher value of update every from netdata.conf or logsmanagement.d.conf commit 88cc3497c403e07686e9fc0876ebb0c610a1404c Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 24 18:43:02 2023 +0100 Tidy up commit c3fca57aac169842637d210269519612b1a91e28 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 24 18:02:04 2023 +0100 Use external update_every from agent, if available commit f7470708ba82495b03297cdf8962a09b16617ddd Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 24 17:40:46 2023 +0100 Re-enable debug logs commit b34f5ac6a2228361ab41df7d7e5e713f724368c0 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 24 15:49:20 2023 +0100 Remove old API calls from web_api_v1.c/h commit 7fbc1e699a7785ec837233b9562199ee6c7684da Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 24 15:32:04 2023 +0100 Add proper termination of stats charts thread commit 4c0fc05c8b14593bd7a0aa68f75a8a1205e04db4 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 24 15:31:36 2023 +0100 Add tests for logsmanag_config functions commit 4dfdacb55707ab46ed6c2d5ce538ac012574b27e Author: Dim-P <dimitris1703@gmail.com> Date: Mon Oct 23 22:01:19 2023 +0100 Remove unused headers from logsmanagement.c commit b324ef396207c5c32e40ea9ad462bf374470b230 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Oct 23 21:56:26 2023 +0100 Remove inline from get_X_dir() functions commit e9656e8121b66cd7ef8b5daaa5d27a134427aa35 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Oct 23 21:50:32 2023 +0100 Proper termination when a signal is received commit b09eec147bdeffae7b268b6335f6ba89f084e050 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Oct 23 20:12:13 2023 +0100 Refactor logs management config code in separate source files commit 014b46a5008fd296f7d25854079c518d018abdec Author: Dim-P <dimitris1703@gmail.com> Date: Mon Oct 23 14:54:47 2023 +0100 Fix p_file_info_destroy() crash commit e0bdfd182513bb8d5d4b4b5b8a4cc248ccf2d64e Author: Dim-P <dimitris1703@gmail.com> Date: Mon Oct 23 14:18:27 2023 +0100 Code refactoring and cleanup commit 6a61cb6e2fd3a535db150b01d9450f44b3e27b30 Author: Dim-P <dimitris1703@gmail.com> Date: Fri Oct 20 14:08:43 2023 +0100 Fix 'source:all' queries commit 45b516aaf819ac142353e323209b7d01e487393f Author: Dim-P <dimitris1703@gmail.com> Date: Thu Oct 19 21:51:05 2023 +0100 Working 'source:...' queries and regular data queries (but not 'source:all') commit 8064b0ee71c63da9803f79424802f860e96326e5 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Oct 19 15:34:23 2023 +0100 Fix issue due to p_file_info_destroy() commit a0aacc9cd00cea60218c9bfd2b9f164918a1e3de Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 17 22:06:34 2023 +0100 Work on facet API changes commit 480584ff9040c07e996b14efb4d21970a347633f Author: Dim-P <dimitris1703@gmail.com> Date: Mon Oct 16 21:43:06 2023 +0100 Add stats charts, running as separate thread commit 34d582dbe4bf2d8d048afab41681e337705bc611 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Oct 16 16:24:02 2023 +0100 Add SSL cipher charts commit ced27ee4e2c981d291f498244f2eef2556a074fb Author: Dim-P <dimitris1703@gmail.com> Date: Sun Oct 15 21:33:29 2023 +0100 Add Response code family, Response code, Response code type, SSL protocol charts commit 40c4a1d91892d49b1e4e18a1c3c43258ded4014d Author: Dim-P <dimitris1703@gmail.com> Date: Sat Oct 14 00:48:48 2023 +0100 Add more web log charts commit 890ed3ff97153dd18d15df2d1b57a181bc498ca8 Author: Dim-P <dimitris1703@gmail.com> Date: Fri Oct 13 22:14:11 2023 +0100 Add web log vhosts and ports charts commit 84733b6b1d353aff70687603019443610a8500c3 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Oct 12 21:40:16 2023 +0100 Add systemd charts commit 14673501e8f48560956f53d5b670bbe801b8f2ae Author: Dim-P <dimitris1703@gmail.com> Date: Wed Oct 11 00:28:43 2023 +0100 Add MQTT charts commit 366eb63b0a27dde6f0f8ba65120f34c18c1b21fd Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 10 21:46:19 2023 +0100 Complete kmsg changes. Reduce mem usage. Fix a dictionary key size bug commit 3d0216365a526ffbc9ce13a20c45447bfccb47d9 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 10 19:18:41 2023 +0100 Add kmsg Subsystem charts commit e61af4bb130a5cf5a5a78133f1e44b2b4c457b24 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 10 16:21:29 2023 +0100 Fix bug of wrong kmsg timestamps in case of use_log_timestamp == 0 commit 03d22e0b26bddf249aab431a4f977bbd5cde98ca Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 10 16:20:47 2023 +0100 Add kmsg charts, except for Subsystem and Device commit f60b0787537a21ed3c4cea5101fcddc50f3bc55a Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 10 13:12:13 2023 +0100 Initialise all docker events chart dimensions at startup commit 5d873d3439abaf3768530cb5b72c6b4ef6565353 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 10 00:53:35 2023 +0100 WIP: Add Docker events logs commit 2cc3d6d98f58fc3ab67a8da3014210b14d0926a1 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Oct 9 18:52:27 2023 +0100 Use macros for num_of_logs_charts and custom_charts functions commit fbd48ad3c9af674601238990d74192427475f2e3 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Oct 9 18:26:17 2023 +0100 Refactor custom charts code for clarity and speed commit a31d80b5dc91161c0d74b10d00bc4fd1e6da7965 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Oct 5 23:58:27 2023 +0100 Add first working iteration of custom charts commit b1e4ab8a460f4b4c3e2804e2f775787d21fbee45 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Oct 5 23:57:27 2023 +0100 Add more custom charts for Netdata error.log commit f1b7605e564da3e297942f073593cdd4c21f88e1 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Oct 5 20:39:40 2023 +0100 Convert collected_logs_* chart updates to macros commit 1459bc2b8bcd5ba21e024b10a8a5101048938f71 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Oct 5 19:11:54 2023 +0100 Use rrdset_timed_done() instead of duration_since_last_update for correct chart timings commit 876854c6ee7586a3eb9fdbf795bcc17a5fd1e6ad Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 3 21:53:14 2023 +0100 Fix some bugs in chart updates commit ae87508485499984bcb9b72bbc7d249c4168b380 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 3 21:32:55 2023 +0100 Functioning generic_chart_init() and generic_chart_update() commit 982a9c4108dbea9571c785b5ff8a9d1e5472066c Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 28 23:32:52 2023 +0100 Add support for multiple .conf files. Add stock examples. commit 8e8abd0731227eb3fb3c6bcd811349575160799e Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 28 17:38:30 2023 +0100 Add support for logsmanagement.d/default.conf commit 1bf0732217b1d9e9959e1507ea96fc2c92ffb2ff Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 28 14:31:03 2023 +0100 Add capabilities. Fix paths in logsmanagement.d.conf commit a849d5b405bb4e5d770726fe99413a4efa7df274 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Sep 26 23:06:31 2023 +0100 Change logs_manag_config_load() commit b0d1783b996286cd87e0832bfb74c29a845d61fc Author: Dim-P <dimitris1703@gmail.com> Date: Tue Sep 26 15:35:30 2023 +0100 Working unit tests and argument parsing commit 6da1b4267a4d58d3a7cbcca9507afe8158a2e324 Author: Dim-P <dimitris1703@gmail.com> Date: Fri Sep 22 00:32:47 2023 +0300 Build logs-management.plugin successfully commit 9e30efe0422e4941f99cc66998d9f42e00a24676 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 21 16:13:21 2023 +0300 Fix print format specifier in web_client_api_request_v1_logsmanagement() * Modify CODEOWNERS * Update README.md Fix indentation * Change snprintfz() to stncpyz() in circ_buff_search(). Change remaining 'chart_name' to 'chartname'. * Replace SQLite version function with macro * Fix some codacy warnings * Update README.md * Update Fluent Bit to v2.1.10 * Remove some comments * Fix Fluent Bit shared library linking for armv7l and FreeBSD * Remove compression source files * Add prefix to rrd_api.h functions * Add more unit tests * Fix kmsg capabilities * Separate kmsg and systemd default paths * Fix some memory leaks and better termination of DB * Add iterative queries if quota is exceeded * Fix centos7 builds * Fix issue where SYSTEMD timestamps are not parsed * Fix logs management packaging. * Fix typo in DEB control file. * Fix indentation and missing new line at EOF * Clean up functions and update help * Fix 400 error when no queryable sources are available * Fix if_modified_since. Add FACET_MAX_VALUE_LENGTH * Add delta parameter and use anchor points in queries * Fix CodeQL #182 warning * Fix packaging issues. * Fix postinstall script for DEB packages. * Improve plugin shutdown speed * Fix docker events chart grouping * Fix functions evloop threads not terminating upon shutdown * Fix coverity issues * Fix logging * Replace 'Netdata error.log' with 'Netdata daemon.log' in 'default.conf' * Remove 'enabled = yes/no' config in logsmanagement.d.conf * Remove 'enabled = X' unused config from logsmanagement.d.conf --------- Co-authored-by: Austin S. Hemmelgarn <austin@netdata.cloud>
1373 lines
68 KiB
C
1373 lines
68 KiB
C
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
|
|
/** @file logsmanag_config.c
|
|
* @brief This file includes functions to manage
|
|
* the logs management configuration.
|
|
*/
|
|
|
|
#include "logsmanag_config.h"
|
|
#include "db_api.h"
|
|
#include "rrd_api/rrd_api.h"
|
|
#include "helper.h"
|
|
|
|
g_logs_manag_config_t g_logs_manag_config = {
|
|
.update_every = UPDATE_EVERY,
|
|
.update_timeout = UPDATE_TIMEOUT_DEFAULT,
|
|
.use_log_timestamp = CONFIG_BOOLEAN_AUTO,
|
|
.circ_buff_max_size_in_mib = CIRCULAR_BUFF_DEFAULT_MAX_SIZE / (1 MiB),
|
|
.circ_buff_drop_logs = CIRCULAR_BUFF_DEFAULT_DROP_LOGS,
|
|
.compression_acceleration = COMPRESSION_ACCELERATION_DEFAULT,
|
|
.db_mode = GLOBAL_DB_MODE_DEFAULT,
|
|
.disk_space_limit_in_mib = DISK_SPACE_LIMIT_DEFAULT,
|
|
.buff_flush_to_db_interval = SAVE_BLOB_TO_DB_DEFAULT,
|
|
.enable_collected_logs_total = ENABLE_COLLECTED_LOGS_TOTAL_DEFAULT,
|
|
.enable_collected_logs_rate = ENABLE_COLLECTED_LOGS_RATE_DEFAULT
|
|
};
|
|
|
|
static logs_manag_db_mode_t db_mode_str_to_db_mode(const char *const db_mode_str){
|
|
if(!db_mode_str || !*db_mode_str) return g_logs_manag_config.db_mode;
|
|
else if(!strcasecmp(db_mode_str, "full")) return LOGS_MANAG_DB_MODE_FULL;
|
|
else if(!strcasecmp(db_mode_str, "none")) return LOGS_MANAG_DB_MODE_NONE;
|
|
else return g_logs_manag_config.db_mode;
|
|
}
|
|
|
|
static struct config log_management_config = {
|
|
.first_section = NULL,
|
|
.last_section = NULL,
|
|
.mutex = NETDATA_MUTEX_INITIALIZER,
|
|
.index = {
|
|
.avl_tree = {
|
|
.root = NULL,
|
|
.compar = appconfig_section_compare
|
|
},
|
|
.rwlock = AVL_LOCK_INITIALIZER
|
|
}
|
|
};
|
|
|
|
static struct Chart_meta chart_types[] = {
|
|
{.type = FLB_TAIL, .init = generic_chart_init, .update = generic_chart_update},
|
|
{.type = FLB_WEB_LOG, .init = web_log_chart_init, .update = web_log_chart_update},
|
|
{.type = FLB_KMSG, .init = kernel_chart_init, .update = kernel_chart_update},
|
|
{.type = FLB_SYSTEMD, .init = systemd_chart_init, .update = systemd_chart_update},
|
|
{.type = FLB_DOCKER_EV, .init = docker_ev_chart_init, .update = docker_ev_chart_update},
|
|
{.type = FLB_SYSLOG, .init = generic_chart_init, .update = generic_chart_update},
|
|
{.type = FLB_SERIAL, .init = generic_chart_init, .update = generic_chart_update},
|
|
{.type = FLB_MQTT, .init = mqtt_chart_init, .update = mqtt_chart_update}
|
|
};
|
|
|
|
char *get_user_config_dir(void){
|
|
char *dir = getenv("NETDATA_USER_CONFIG_DIR");
|
|
|
|
return dir ? dir : CONFIG_DIR;
|
|
}
|
|
|
|
char *get_stock_config_dir(void){
|
|
char *dir = getenv("NETDATA_STOCK_CONFIG_DIR");
|
|
|
|
return dir ? dir : LIBCONFIG_DIR;
|
|
}
|
|
|
|
char *get_log_dir(void){
|
|
char *dir = getenv("NETDATA_LOG_DIR");
|
|
|
|
return dir ? dir : LOG_DIR;
|
|
}
|
|
|
|
char *get_cache_dir(void){
|
|
char *dir = getenv("NETDATA_CACHE_DIR");
|
|
|
|
return dir ? dir : CACHE_DIR;
|
|
}
|
|
|
|
/**
|
|
* @brief Cleanup p_file_info struct
|
|
* @param p_file_info The struct of File_info type to be cleaned up.
|
|
* @todo Pass p_file_info by reference, so that it can be set to NULL. */
|
|
static void p_file_info_destroy(void *arg){
|
|
struct File_info *p_file_info = (struct File_info *) arg;
|
|
|
|
// TODO: Clean up rrd / chart stuff.
|
|
// p_file_info->chart_meta
|
|
|
|
if(unlikely(!p_file_info)){
|
|
collector_info("p_file_info_destroy() called but p_file_info == NULL - already destroyed?");
|
|
return;
|
|
}
|
|
|
|
char chartname[100];
|
|
snprintfz(chartname, 100, "%s", p_file_info->chartname ? p_file_info->chartname : "Unknown");
|
|
collector_info("[%s]: p_file_info_destroy() cleanup...", chartname);
|
|
|
|
__atomic_store_n(&p_file_info->state, LOG_SRC_EXITING, __ATOMIC_RELAXED);
|
|
|
|
if(uv_is_active((uv_handle_t *) &p_file_info->flb_tmp_buff_cpy_timer)){
|
|
uv_timer_stop(&p_file_info->flb_tmp_buff_cpy_timer);
|
|
if (!uv_is_closing((uv_handle_t *) &p_file_info->flb_tmp_buff_cpy_timer))
|
|
uv_close((uv_handle_t *) &p_file_info->flb_tmp_buff_cpy_timer, NULL);
|
|
}
|
|
|
|
// TODO: Need to do proper termination of DB threads and allocated memory.
|
|
if(p_file_info->db_writer_thread){
|
|
uv_thread_join(p_file_info->db_writer_thread);
|
|
sqlite3_finalize(p_file_info->stmt_get_log_msg_metadata_asc);
|
|
sqlite3_finalize(p_file_info->stmt_get_log_msg_metadata_desc);
|
|
if(sqlite3_close(p_file_info->db) != SQLITE_OK)
|
|
collector_error("[%s]: Failed to close database", chartname);
|
|
freez(p_file_info->db_mut);
|
|
freez((void *) p_file_info->db_metadata);
|
|
freez((void *) p_file_info->db_dir);
|
|
freez(p_file_info->db_writer_thread);
|
|
}
|
|
|
|
freez((void *) p_file_info->chartname);
|
|
freez(p_file_info->filename);
|
|
freez((void *) p_file_info->file_basename);
|
|
freez((void *) p_file_info->stream_guid);
|
|
|
|
for(int i = 1; i <= BLOB_MAX_FILES; i++){
|
|
if(p_file_info->blob_handles[i]){
|
|
uv_fs_close(NULL, NULL, p_file_info->blob_handles[i], NULL);
|
|
p_file_info->blob_handles[i] = 0;
|
|
}
|
|
}
|
|
|
|
if(p_file_info->circ_buff)
|
|
circ_buff_destroy(p_file_info->circ_buff);
|
|
|
|
if(p_file_info->parser_metrics){
|
|
switch(p_file_info->log_type){
|
|
case FLB_WEB_LOG: {
|
|
if(p_file_info->parser_metrics->web_log)
|
|
freez(p_file_info->parser_metrics->web_log);
|
|
break;
|
|
}
|
|
case FLB_KMSG: {
|
|
if(p_file_info->parser_metrics->kernel){
|
|
dictionary_destroy(p_file_info->parser_metrics->kernel->subsystem);
|
|
dictionary_destroy(p_file_info->parser_metrics->kernel->device);
|
|
freez(p_file_info->parser_metrics->kernel);
|
|
}
|
|
break;
|
|
}
|
|
case FLB_SYSTEMD:
|
|
case FLB_SYSLOG: {
|
|
if(p_file_info->parser_metrics->systemd)
|
|
freez(p_file_info->parser_metrics->systemd);
|
|
break;
|
|
}
|
|
case FLB_DOCKER_EV: {
|
|
if(p_file_info->parser_metrics->docker_ev)
|
|
freez(p_file_info->parser_metrics->docker_ev);
|
|
break;
|
|
}
|
|
case FLB_MQTT: {
|
|
if(p_file_info->parser_metrics->mqtt){
|
|
dictionary_destroy(p_file_info->parser_metrics->mqtt->topic);
|
|
freez(p_file_info->parser_metrics->mqtt);
|
|
}
|
|
break;
|
|
}
|
|
default:
|
|
break;
|
|
}
|
|
|
|
for(int i = 0; p_file_info->parser_cus_config &&
|
|
p_file_info->parser_metrics->parser_cus &&
|
|
p_file_info->parser_cus_config[i]; i++){
|
|
freez(p_file_info->parser_cus_config[i]->chartname);
|
|
freez(p_file_info->parser_cus_config[i]->regex_str);
|
|
freez(p_file_info->parser_cus_config[i]->regex_name);
|
|
regfree(&p_file_info->parser_cus_config[i]->regex);
|
|
freez(p_file_info->parser_cus_config[i]);
|
|
freez(p_file_info->parser_metrics->parser_cus[i]);
|
|
}
|
|
|
|
freez(p_file_info->parser_cus_config);
|
|
freez(p_file_info->parser_metrics->parser_cus);
|
|
|
|
freez(p_file_info->parser_metrics);
|
|
}
|
|
|
|
if(p_file_info->parser_config){
|
|
freez(p_file_info->parser_config->gen_config);
|
|
freez(p_file_info->parser_config);
|
|
}
|
|
|
|
Flb_output_config_t *output_next = p_file_info->flb_outputs;
|
|
while(output_next){
|
|
Flb_output_config_t *output = output_next;
|
|
output_next = output_next->next;
|
|
|
|
struct flb_output_config_param *param_next = output->param;
|
|
while(param_next){
|
|
struct flb_output_config_param *param = param_next;
|
|
param_next = param->next;
|
|
freez(param->key);
|
|
freez(param->val);
|
|
freez(param);
|
|
}
|
|
freez(output->plugin);
|
|
freez(output);
|
|
}
|
|
|
|
freez(p_file_info->flb_config);
|
|
|
|
freez(p_file_info);
|
|
|
|
collector_info("[%s]: p_file_info_destroy() cleanup done", chartname);
|
|
}
|
|
|
|
void p_file_info_destroy_all(void){
|
|
if(p_file_infos_arr){
|
|
uv_thread_t thread_id[p_file_infos_arr->count];
|
|
for(int i = 0; i < p_file_infos_arr->count; i++){
|
|
fatal_assert(0 == uv_thread_create(&thread_id[i], p_file_info_destroy, p_file_infos_arr->data[i]));
|
|
}
|
|
for(int i = 0; i < p_file_infos_arr->count; i++){
|
|
uv_thread_join(&thread_id[i]);
|
|
}
|
|
freez(p_file_infos_arr);
|
|
p_file_infos_arr = NULL;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* @brief Load logs management configuration.
|
|
* @returns 0 if success,
|
|
* -1 if config file not found
|
|
* -2 if p_flb_srvc_config if is NULL (no flb_srvc_config_t provided)
|
|
*/
|
|
int logs_manag_config_load( flb_srvc_config_t *p_flb_srvc_config,
|
|
Flb_socket_config_t **forward_in_config_p,
|
|
int g_update_every){
|
|
int rc = LOGS_MANAG_CONFIG_LOAD_ERROR_OK;
|
|
char section[100];
|
|
char temp_path[FILENAME_MAX + 1];
|
|
|
|
struct config logsmanagement_d_conf = {
|
|
.first_section = NULL,
|
|
.last_section = NULL,
|
|
.mutex = NETDATA_MUTEX_INITIALIZER,
|
|
.index = {
|
|
.avl_tree = {
|
|
.root = NULL,
|
|
.compar = appconfig_section_compare
|
|
},
|
|
.rwlock = AVL_LOCK_INITIALIZER
|
|
}
|
|
};
|
|
|
|
char *filename = strdupz_path_subpath(get_user_config_dir(), "logsmanagement.d.conf");
|
|
if(!appconfig_load(&logsmanagement_d_conf, filename, 0, NULL)) {
|
|
collector_info("CONFIG: cannot load user config '%s'. Will try stock config.", filename);
|
|
freez(filename);
|
|
|
|
filename = strdupz_path_subpath(get_stock_config_dir(), "logsmanagement.d.conf");
|
|
if(!appconfig_load(&logsmanagement_d_conf, filename, 0, NULL)){
|
|
collector_error("CONFIG: cannot load stock config '%s'. Logs management will be disabled.", filename);
|
|
rc = LOGS_MANAG_CONFIG_LOAD_ERROR_NO_STOCK_CONFIG;
|
|
}
|
|
}
|
|
freez(filename);
|
|
|
|
|
|
/* [global] section */
|
|
|
|
snprintfz(section, 100, "global");
|
|
|
|
g_logs_manag_config.update_every = appconfig_get_number(
|
|
&logsmanagement_d_conf,
|
|
section,
|
|
"update every",
|
|
g_logs_manag_config.update_every);
|
|
|
|
g_logs_manag_config.update_every =
|
|
g_update_every && g_update_every > g_logs_manag_config.update_every ?
|
|
g_update_every : g_logs_manag_config.update_every;
|
|
|
|
g_logs_manag_config.update_timeout = appconfig_get_number(
|
|
&logsmanagement_d_conf,
|
|
section,
|
|
"update timeout",
|
|
UPDATE_TIMEOUT_DEFAULT);
|
|
|
|
if(g_logs_manag_config.update_timeout < g_logs_manag_config.update_every)
|
|
g_logs_manag_config.update_timeout = g_logs_manag_config.update_every;
|
|
|
|
g_logs_manag_config.use_log_timestamp = appconfig_get_boolean_ondemand(
|
|
&logsmanagement_d_conf,
|
|
section,
|
|
"use log timestamp",
|
|
g_logs_manag_config.use_log_timestamp);
|
|
|
|
g_logs_manag_config.circ_buff_max_size_in_mib = appconfig_get_number(
|
|
&logsmanagement_d_conf,
|
|
section,
|
|
"circular buffer max size MiB",
|
|
g_logs_manag_config.circ_buff_max_size_in_mib);
|
|
|
|
g_logs_manag_config.circ_buff_drop_logs = appconfig_get_boolean(
|
|
&logsmanagement_d_conf,
|
|
section,
|
|
"circular buffer drop logs if full",
|
|
g_logs_manag_config.circ_buff_drop_logs);
|
|
|
|
|
|
g_logs_manag_config.compression_acceleration = appconfig_get_number(
|
|
&logsmanagement_d_conf,
|
|
section,
|
|
"compression acceleration",
|
|
g_logs_manag_config.compression_acceleration);
|
|
|
|
g_logs_manag_config.enable_collected_logs_total = appconfig_get_boolean(
|
|
&logsmanagement_d_conf,
|
|
section,
|
|
"collected logs total chart enable",
|
|
g_logs_manag_config.enable_collected_logs_total);
|
|
|
|
g_logs_manag_config.enable_collected_logs_rate = appconfig_get_boolean(
|
|
&logsmanagement_d_conf,
|
|
section,
|
|
"collected logs rate chart enable",
|
|
g_logs_manag_config.enable_collected_logs_rate);
|
|
|
|
if(!rc){
|
|
collector_info("CONFIG: [%s] update every: %d", section, g_logs_manag_config.update_every);
|
|
collector_info("CONFIG: [%s] update timeout: %d", section, g_logs_manag_config.update_timeout);
|
|
collector_info("CONFIG: [%s] use log timestamp: %d", section, g_logs_manag_config.use_log_timestamp);
|
|
collector_info("CONFIG: [%s] circular buffer max size MiB: %d", section, g_logs_manag_config.circ_buff_max_size_in_mib);
|
|
collector_info("CONFIG: [%s] circular buffer drop logs if full: %d", section, g_logs_manag_config.circ_buff_drop_logs);
|
|
collector_info("CONFIG: [%s] compression acceleration: %d", section, g_logs_manag_config.compression_acceleration);
|
|
collector_info("CONFIG: [%s] collected logs total chart enable: %d", section, g_logs_manag_config.enable_collected_logs_total);
|
|
collector_info("CONFIG: [%s] collected logs rate chart enable: %d", section, g_logs_manag_config.enable_collected_logs_rate);
|
|
}
|
|
|
|
|
|
/* [db] section */
|
|
|
|
snprintfz(section, 100, "db");
|
|
|
|
const char *const db_mode_str = appconfig_get(
|
|
&logsmanagement_d_conf,
|
|
section,
|
|
"db mode",
|
|
GLOBAL_DB_MODE_DEFAULT_STR);
|
|
g_logs_manag_config.db_mode = db_mode_str_to_db_mode(db_mode_str);
|
|
|
|
snprintfz(temp_path, FILENAME_MAX, "%s" LOGS_MANAG_DB_SUBPATH, get_cache_dir());
|
|
db_set_main_dir(appconfig_get(&logsmanagement_d_conf, section, "db dir", temp_path));
|
|
|
|
g_logs_manag_config.buff_flush_to_db_interval = appconfig_get_number(
|
|
&logsmanagement_d_conf,
|
|
section,
|
|
"circular buffer flush to db",
|
|
g_logs_manag_config.buff_flush_to_db_interval);
|
|
|
|
g_logs_manag_config.disk_space_limit_in_mib = appconfig_get_number(
|
|
&logsmanagement_d_conf,
|
|
section,
|
|
"disk space limit MiB",
|
|
g_logs_manag_config.disk_space_limit_in_mib);
|
|
|
|
if(!rc){
|
|
collector_info("CONFIG: [%s] db mode: %s [%d]", section, db_mode_str, (int) g_logs_manag_config.db_mode);
|
|
collector_info("CONFIG: [%s] db dir: %s", section, temp_path);
|
|
collector_info("CONFIG: [%s] circular buffer flush to db: %d", section, g_logs_manag_config.buff_flush_to_db_interval);
|
|
collector_info("CONFIG: [%s] disk space limit MiB: %d", section, g_logs_manag_config.disk_space_limit_in_mib);
|
|
}
|
|
|
|
|
|
/* [forward input] section */
|
|
|
|
snprintfz(section, 100, "forward input");
|
|
|
|
const int fwd_enable = appconfig_get_boolean(
|
|
&logsmanagement_d_conf,
|
|
section,
|
|
"enabled",
|
|
CONFIG_BOOLEAN_NO);
|
|
|
|
*forward_in_config_p = (Flb_socket_config_t *) callocz(1, sizeof(Flb_socket_config_t));
|
|
|
|
(*forward_in_config_p)->unix_path = appconfig_get(
|
|
&logsmanagement_d_conf,
|
|
section,
|
|
"unix path",
|
|
FLB_FORWARD_UNIX_PATH_DEFAULT);
|
|
|
|
(*forward_in_config_p)->unix_perm = appconfig_get(
|
|
&logsmanagement_d_conf,
|
|
section,
|
|
"unix perm",
|
|
FLB_FORWARD_UNIX_PERM_DEFAULT);
|
|
|
|
// TODO: Check if listen is in valid format
|
|
(*forward_in_config_p)->listen = appconfig_get(
|
|
&logsmanagement_d_conf,
|
|
section,
|
|
"listen",
|
|
FLB_FORWARD_ADDR_DEFAULT);
|
|
|
|
(*forward_in_config_p)->port = appconfig_get(
|
|
&logsmanagement_d_conf,
|
|
section,
|
|
"port",
|
|
FLB_FORWARD_PORT_DEFAULT);
|
|
|
|
if(!rc){
|
|
collector_info("CONFIG: [%s] enabled: %s", section, fwd_enable ? "yes" : "no");
|
|
collector_info("CONFIG: [%s] unix path: %s", section, (*forward_in_config_p)->unix_path);
|
|
collector_info("CONFIG: [%s] unix perm: %s", section, (*forward_in_config_p)->unix_perm);
|
|
collector_info("CONFIG: [%s] listen: %s", section, (*forward_in_config_p)->listen);
|
|
collector_info("CONFIG: [%s] port: %s", section, (*forward_in_config_p)->port);
|
|
}
|
|
|
|
if(!fwd_enable) {
|
|
freez(*forward_in_config_p);
|
|
*forward_in_config_p = NULL;
|
|
}
|
|
|
|
|
|
/* [fluent bit] section */
|
|
|
|
snprintfz(section, 100, "fluent bit");
|
|
|
|
snprintfz(temp_path, FILENAME_MAX, "%s/%s", get_log_dir(), FLB_LOG_FILENAME_DEFAULT);
|
|
|
|
if(p_flb_srvc_config){
|
|
p_flb_srvc_config->flush = appconfig_get(
|
|
&logsmanagement_d_conf,
|
|
section,
|
|
"flush",
|
|
p_flb_srvc_config->flush);
|
|
|
|
p_flb_srvc_config->http_listen = appconfig_get(
|
|
&logsmanagement_d_conf,
|
|
section,
|
|
"http listen",
|
|
p_flb_srvc_config->http_listen);
|
|
|
|
p_flb_srvc_config->http_port = appconfig_get(
|
|
&logsmanagement_d_conf,
|
|
section,
|
|
"http port",
|
|
p_flb_srvc_config->http_port);
|
|
|
|
p_flb_srvc_config->http_server = appconfig_get(
|
|
&logsmanagement_d_conf,
|
|
section,
|
|
"http server",
|
|
p_flb_srvc_config->http_server);
|
|
|
|
p_flb_srvc_config->log_path = appconfig_get(
|
|
&logsmanagement_d_conf,
|
|
section,
|
|
"log file",
|
|
temp_path);
|
|
|
|
p_flb_srvc_config->log_level = appconfig_get(
|
|
&logsmanagement_d_conf,
|
|
section,
|
|
"log level",
|
|
p_flb_srvc_config->log_level);
|
|
|
|
p_flb_srvc_config->coro_stack_size = appconfig_get(
|
|
&logsmanagement_d_conf,
|
|
section,
|
|
"coro stack size",
|
|
p_flb_srvc_config->coro_stack_size);
|
|
}
|
|
else
|
|
rc = LOGS_MANAG_CONFIG_LOAD_ERROR_P_FLB_SRVC_NULL;
|
|
|
|
if(!rc){
|
|
collector_info("CONFIG: [%s] flush: %s", section, p_flb_srvc_config->flush);
|
|
collector_info("CONFIG: [%s] http listen: %s", section, p_flb_srvc_config->http_listen);
|
|
collector_info("CONFIG: [%s] http port: %s", section, p_flb_srvc_config->http_port);
|
|
collector_info("CONFIG: [%s] http server: %s", section, p_flb_srvc_config->http_server);
|
|
collector_info("CONFIG: [%s] log file: %s", section, p_flb_srvc_config->log_path);
|
|
collector_info("CONFIG: [%s] log level: %s", section, p_flb_srvc_config->log_level);
|
|
collector_info("CONFIG: [%s] coro stack size: %s", section, p_flb_srvc_config->coro_stack_size);
|
|
}
|
|
|
|
return rc;
|
|
}
|
|
|
|
static bool metrics_dict_conflict_cb(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value, void *data __maybe_unused){
|
|
((metrics_dict_item_t *)old_value)->num_new += ((metrics_dict_item_t *)new_value)->num_new;
|
|
return true;
|
|
}
|
|
|
|
#define FLB_OUTPUT_PLUGIN_NAME_KEY "name"
|
|
|
|
static int flb_output_param_get_cb(void *entry, void *data){
|
|
struct config_option *option = (struct config_option *) entry;
|
|
Flb_output_config_t *flb_output = (Flb_output_config_t *) data;
|
|
|
|
char *param_prefix = callocz(1, snprintf(NULL, 0, "output %d", MAX_OUTPUTS_PER_SOURCE) + 1);
|
|
sprintf(param_prefix, "output %d", flb_output->id);
|
|
size_t param_prefix_len = strlen(param_prefix);
|
|
|
|
if(!strncasecmp(option->name, param_prefix, param_prefix_len)){ // param->name looks like "output 1 host"
|
|
char *param_key = &option->name[param_prefix_len]; // param_key should look like " host"
|
|
while(*param_key == ' ') param_key++; // remove whitespace so it looks like "host"
|
|
|
|
if(*param_key && strcasecmp(param_key, FLB_OUTPUT_PLUGIN_NAME_KEY)){ // ignore param_key "name"
|
|
// debug_log( "config_option: name[%s], value[%s]", option->name, option->value);
|
|
// debug_log( "config option kv:[%s][%s]", param_key, option->value);
|
|
|
|
struct flb_output_config_param **p = &flb_output->param;
|
|
while((*p) != NULL) p = &((*p)->next); // Go to last param of linked list
|
|
|
|
(*p) = callocz(1, sizeof(struct flb_output_config_param));
|
|
(*p)->key = strdupz(param_key);
|
|
(*p)->val = strdupz(option->value);
|
|
}
|
|
}
|
|
|
|
freez(param_prefix);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* @brief Initialize logs management based on a section configuration.
|
|
* @note On error, calls p_file_info_destroy() to clean up before returning.
|
|
* @param config_section Section to read configuration from.
|
|
* @todo How to handle duplicate entries?
|
|
*/
|
|
static void config_section_init(uv_loop_t *main_loop,
|
|
struct section *config_section,
|
|
Flb_socket_config_t *forward_in_config,
|
|
flb_srvc_config_t *p_flb_srvc_config,
|
|
netdata_mutex_t *stdout_mut){
|
|
|
|
struct File_info *p_file_info = callocz(1, sizeof(struct File_info));
|
|
|
|
/* -------------------------------------------------------------------------
|
|
* Check if config_section->name is valid and if so, use it as chartname.
|
|
* ------------------------------------------------------------------------- */
|
|
if(config_section->name && *config_section->name){
|
|
p_file_info->chartname = strdupz(config_section->name);
|
|
netdata_fix_chart_id((char *) p_file_info->chartname);
|
|
collector_info("[%s]: Initializing config loading", p_file_info->chartname);
|
|
} else {
|
|
collector_error("Invalid logs management config section.");
|
|
return p_file_info_destroy(p_file_info);
|
|
}
|
|
|
|
|
|
/* -------------------------------------------------------------------------
|
|
* Check if this log source is enabled.
|
|
* ------------------------------------------------------------------------- */
|
|
if(appconfig_get_boolean(&log_management_config, config_section->name, "enabled", CONFIG_BOOLEAN_NO)){
|
|
collector_info("[%s]: enabled = yes", p_file_info->chartname);
|
|
} else {
|
|
collector_info("[%s]: enabled = no", p_file_info->chartname);
|
|
return p_file_info_destroy(p_file_info);
|
|
}
|
|
|
|
|
|
/* -------------------------------------------------------------------------
|
|
* Check log type.
|
|
* ------------------------------------------------------------------------- */
|
|
char *type = appconfig_get(&log_management_config, config_section->name, "log type", "flb_tail");
|
|
if(!type || !*type) p_file_info->log_type = FLB_TAIL; // Default
|
|
else{
|
|
if(!strcasecmp(type, "flb_tail")) p_file_info->log_type = FLB_TAIL;
|
|
else if (!strcasecmp(type, "flb_web_log")) p_file_info->log_type = FLB_WEB_LOG;
|
|
else if (!strcasecmp(type, "flb_kmsg")) p_file_info->log_type = FLB_KMSG;
|
|
else if (!strcasecmp(type, "flb_systemd")) p_file_info->log_type = FLB_SYSTEMD;
|
|
else if (!strcasecmp(type, "flb_docker_events")) p_file_info->log_type = FLB_DOCKER_EV;
|
|
else if (!strcasecmp(type, "flb_syslog")) p_file_info->log_type = FLB_SYSLOG;
|
|
else if (!strcasecmp(type, "flb_serial")) p_file_info->log_type = FLB_SERIAL;
|
|
else if (!strcasecmp(type, "flb_mqtt")) p_file_info->log_type = FLB_MQTT;
|
|
else p_file_info->log_type = FLB_TAIL;
|
|
}
|
|
freez(type);
|
|
collector_info("[%s]: log type = %s", p_file_info->chartname, log_src_type_t_str[p_file_info->log_type]);
|
|
|
|
|
|
/* -------------------------------------------------------------------------
|
|
* Read log source.
|
|
* ------------------------------------------------------------------------- */
|
|
char *source = appconfig_get(&log_management_config, config_section->name, "log source", "local");
|
|
if(!source || !*source) p_file_info->log_source = LOG_SOURCE_LOCAL; // Default
|
|
else if(!strcasecmp(source, "forward")) p_file_info->log_source = LOG_SOURCE_FORWARD;
|
|
else p_file_info->log_source = LOG_SOURCE_LOCAL;
|
|
freez(source);
|
|
collector_info("[%s]: log source = %s", p_file_info->chartname, log_src_t_str[p_file_info->log_source]);
|
|
|
|
if(p_file_info->log_source == LOG_SOURCE_FORWARD && !forward_in_config){
|
|
collector_info("[%s]: forward_in_config == NULL - this log source will be disabled", p_file_info->chartname);
|
|
return p_file_info_destroy(p_file_info);
|
|
}
|
|
|
|
|
|
/* -------------------------------------------------------------------------
|
|
* Read stream uuid.
|
|
* ------------------------------------------------------------------------- */
|
|
p_file_info->stream_guid = appconfig_get(&log_management_config, config_section->name, "stream guid", "");
|
|
collector_info("[%s]: stream guid = %s", p_file_info->chartname, p_file_info->stream_guid);
|
|
|
|
|
|
/* -------------------------------------------------------------------------
|
|
* Read log path configuration and check if it is valid.
|
|
* ------------------------------------------------------------------------- */
|
|
p_file_info->filename = appconfig_get(&log_management_config, config_section->name, "log path", LOG_PATH_AUTO);
|
|
if( /* path doesn't matter when log source is not local */
|
|
(p_file_info->log_source == LOG_SOURCE_LOCAL) &&
|
|
|
|
/* FLB_SYSLOG is special case, may or may not require a path */
|
|
(p_file_info->log_type != FLB_SYSLOG) &&
|
|
|
|
/* FLB_MQTT is special case, does not require a path */
|
|
(p_file_info->log_type != FLB_MQTT) &&
|
|
|
|
(!p_file_info->filename /* Sanity check */ ||
|
|
!*p_file_info->filename ||
|
|
!strcmp(p_file_info->filename, LOG_PATH_AUTO) ||
|
|
access(p_file_info->filename, R_OK)
|
|
)){
|
|
|
|
freez(p_file_info->filename);
|
|
p_file_info->filename = NULL;
|
|
|
|
switch(p_file_info->log_type){
|
|
case FLB_TAIL:
|
|
if(!strcasecmp(p_file_info->chartname, "Netdata_daemon.log")){
|
|
char path[FILENAME_MAX + 1];
|
|
snprintfz(path, FILENAME_MAX, "%s/daemon.log", get_log_dir());
|
|
if(access(path, R_OK)) {
|
|
collector_error("[%s]: 'Netdata_daemon.log' path (%s) invalid, unknown or needs permissions",
|
|
p_file_info->chartname, path);
|
|
return p_file_info_destroy(p_file_info);
|
|
} else p_file_info->filename = strdupz(path);
|
|
} else if(!strcasecmp(p_file_info->chartname, "Netdata_fluentbit.log")){
|
|
if(access(p_flb_srvc_config->log_path, R_OK)){
|
|
collector_error("[%s]: Netdata_fluentbit.log path (%s) invalid, unknown or needs permissions",
|
|
p_file_info->chartname, p_flb_srvc_config->log_path);
|
|
return p_file_info_destroy(p_file_info);
|
|
} else p_file_info->filename = strdupz(p_flb_srvc_config->log_path);
|
|
} else if(!strcasecmp(p_file_info->chartname, "Auth.log_tail")){
|
|
const char * const auth_path_default[] = {
|
|
"/var/log/auth.log",
|
|
NULL
|
|
};
|
|
int i = 0;
|
|
while(auth_path_default[i] && access(auth_path_default[i], R_OK)){i++;};
|
|
if(!auth_path_default[i]){
|
|
collector_error("[%s]: auth.log path invalid, unknown or needs permissions", p_file_info->chartname);
|
|
return p_file_info_destroy(p_file_info);
|
|
} else p_file_info->filename = strdupz(auth_path_default[i]);
|
|
} else if(!strcasecmp(p_file_info->chartname, "syslog_tail")){
|
|
const char * const syslog_path_default[] = {
|
|
"/var/log/syslog", /* Debian, Ubuntu */
|
|
"/var/log/messages", /* RHEL, Red Hat, CentOS, Fedora */
|
|
NULL
|
|
};
|
|
int i = 0;
|
|
while(syslog_path_default[i] && access(syslog_path_default[i], R_OK)){i++;};
|
|
if(!syslog_path_default[i]){
|
|
collector_error("[%s]: syslog path invalid, unknown or needs permissions", p_file_info->chartname);
|
|
return p_file_info_destroy(p_file_info);
|
|
} else p_file_info->filename = strdupz(syslog_path_default[i]);
|
|
}
|
|
break;
|
|
case FLB_WEB_LOG:
|
|
if(!strcasecmp(p_file_info->chartname, "Apache_access.log")){
|
|
const char * const apache_access_path_default[] = {
|
|
"/var/log/apache/access.log",
|
|
"/var/log/apache2/access.log",
|
|
"/var/log/apache2/access_log",
|
|
"/var/log/httpd/access_log",
|
|
"/var/log/httpd-access.log",
|
|
NULL
|
|
};
|
|
int i = 0;
|
|
while(apache_access_path_default[i] && access(apache_access_path_default[i], R_OK)){i++;};
|
|
if(!apache_access_path_default[i]){
|
|
collector_error("[%s]: Apache access.log path invalid, unknown or needs permissions", p_file_info->chartname);
|
|
return p_file_info_destroy(p_file_info);
|
|
} else p_file_info->filename = strdupz(apache_access_path_default[i]);
|
|
} else if(!strcasecmp(p_file_info->chartname, "Nginx_access.log")){
|
|
const char * const nginx_access_path_default[] = {
|
|
"/var/log/nginx/access.log",
|
|
NULL
|
|
};
|
|
int i = 0;
|
|
while(nginx_access_path_default[i] && access(nginx_access_path_default[i], R_OK)){i++;};
|
|
if(!nginx_access_path_default[i]){
|
|
collector_error("[%s]: Nginx access.log path invalid, unknown or needs permissions", p_file_info->chartname);
|
|
return p_file_info_destroy(p_file_info);
|
|
} else p_file_info->filename = strdupz(nginx_access_path_default[i]);
|
|
}
|
|
break;
|
|
case FLB_KMSG:
|
|
if(access(KMSG_DEFAULT_PATH, R_OK)){
|
|
collector_error("[%s]: kmsg default path invalid, unknown or needs permissions", p_file_info->chartname);
|
|
return p_file_info_destroy(p_file_info);
|
|
} else p_file_info->filename = strdupz(KMSG_DEFAULT_PATH);
|
|
break;
|
|
case FLB_SYSTEMD:
|
|
p_file_info->filename = strdupz(SYSTEMD_DEFAULT_PATH);
|
|
break;
|
|
case FLB_DOCKER_EV:
|
|
if(access(DOCKER_EV_DEFAULT_PATH, R_OK)){
|
|
collector_error("[%s]: Docker socket default Unix path invalid, unknown or needs permissions", p_file_info->chartname);
|
|
return p_file_info_destroy(p_file_info);
|
|
} else p_file_info->filename = strdupz(DOCKER_EV_DEFAULT_PATH);
|
|
break;
|
|
default:
|
|
collector_error("[%s]: log path invalid or unknown", p_file_info->chartname);
|
|
return p_file_info_destroy(p_file_info);
|
|
}
|
|
}
|
|
p_file_info->file_basename = get_basename(p_file_info->filename);
|
|
collector_info("[%s]: p_file_info->filename: %s", p_file_info->chartname,
|
|
p_file_info->filename ? p_file_info->filename : "NULL");
|
|
collector_info("[%s]: p_file_info->file_basename: %s", p_file_info->chartname,
|
|
p_file_info->file_basename ? p_file_info->file_basename : "NULL");
|
|
if(unlikely(!p_file_info->filename)) return p_file_info_destroy(p_file_info);
|
|
|
|
|
|
/* -------------------------------------------------------------------------
|
|
* Read "update every" and "update timeout" configuration.
|
|
* ------------------------------------------------------------------------- */
|
|
p_file_info->update_every = appconfig_get_number( &log_management_config, config_section->name,
|
|
"update every", g_logs_manag_config.update_every);
|
|
collector_info("[%s]: update every = %d", p_file_info->chartname, p_file_info->update_every);
|
|
|
|
p_file_info->update_timeout = appconfig_get_number( &log_management_config, config_section->name,
|
|
"update timeout", g_logs_manag_config.update_timeout);
|
|
if(p_file_info->update_timeout < p_file_info->update_every) p_file_info->update_timeout = p_file_info->update_every;
|
|
collector_info("[%s]: update timeout = %d", p_file_info->chartname, p_file_info->update_timeout);
|
|
|
|
|
|
/* -------------------------------------------------------------------------
|
|
* Read "use log timestamp" configuration.
|
|
* ------------------------------------------------------------------------- */
|
|
p_file_info->use_log_timestamp = appconfig_get_boolean_ondemand(&log_management_config, config_section->name,
|
|
"use log timestamp",
|
|
g_logs_manag_config.use_log_timestamp);
|
|
collector_info("[%s]: use log timestamp = %s", p_file_info->chartname,
|
|
p_file_info->use_log_timestamp ? "auto or yes" : "no");
|
|
|
|
|
|
/* -------------------------------------------------------------------------
|
|
* Read compression acceleration configuration.
|
|
* ------------------------------------------------------------------------- */
|
|
p_file_info->compression_accel = appconfig_get_number( &log_management_config, config_section->name,
|
|
"compression acceleration",
|
|
g_logs_manag_config.compression_acceleration);
|
|
collector_info("[%s]: compression acceleration = %d", p_file_info->chartname, p_file_info->compression_accel);
|
|
|
|
|
|
/* -------------------------------------------------------------------------
|
|
* Read DB mode.
|
|
* ------------------------------------------------------------------------- */
|
|
const char *const db_mode_str = appconfig_get(&log_management_config, config_section->name, "db mode", NULL);
|
|
collector_info("[%s]: db mode = %s", p_file_info->chartname, db_mode_str ? db_mode_str : "NULL");
|
|
p_file_info->db_mode = db_mode_str_to_db_mode(db_mode_str);
|
|
freez((void *)db_mode_str);
|
|
|
|
|
|
/* -------------------------------------------------------------------------
|
|
* Read save logs from buffers to DB interval configuration.
|
|
* ------------------------------------------------------------------------- */
|
|
p_file_info->buff_flush_to_db_interval = appconfig_get_number( &log_management_config, config_section->name,
|
|
"circular buffer flush to db",
|
|
g_logs_manag_config.buff_flush_to_db_interval);
|
|
if(p_file_info->buff_flush_to_db_interval > SAVE_BLOB_TO_DB_MAX) {
|
|
p_file_info->buff_flush_to_db_interval = SAVE_BLOB_TO_DB_MAX;
|
|
collector_info("[%s]: circular buffer flush to db out of range. Using maximum permitted value: %d",
|
|
p_file_info->chartname, p_file_info->buff_flush_to_db_interval);
|
|
|
|
} else if(p_file_info->buff_flush_to_db_interval < SAVE_BLOB_TO_DB_MIN) {
|
|
p_file_info->buff_flush_to_db_interval = SAVE_BLOB_TO_DB_MIN;
|
|
collector_info("[%s]: circular buffer flush to db out of range. Using minimum permitted value: %d",
|
|
p_file_info->chartname, p_file_info->buff_flush_to_db_interval);
|
|
}
|
|
collector_info("[%s]: circular buffer flush to db = %d", p_file_info->chartname, p_file_info->buff_flush_to_db_interval);
|
|
|
|
|
|
/* -------------------------------------------------------------------------
|
|
* Read BLOB max size configuration.
|
|
* ------------------------------------------------------------------------- */
|
|
p_file_info->blob_max_size = appconfig_get_number( &log_management_config, config_section->name,
|
|
"disk space limit MiB",
|
|
g_logs_manag_config.disk_space_limit_in_mib) MiB / BLOB_MAX_FILES;
|
|
collector_info("[%s]: BLOB max size = %lld", p_file_info->chartname, (long long)p_file_info->blob_max_size);
|
|
|
|
|
|
/* -------------------------------------------------------------------------
|
|
* Read collected logs chart configuration.
|
|
* ------------------------------------------------------------------------- */
|
|
p_file_info->parser_config = callocz(1, sizeof(Log_parser_config_t));
|
|
|
|
if(appconfig_get_boolean(&log_management_config, config_section->name,
|
|
"collected logs total chart enable",
|
|
g_logs_manag_config.enable_collected_logs_total)){
|
|
p_file_info->parser_config->chart_config |= CHART_COLLECTED_LOGS_TOTAL;
|
|
}
|
|
collector_info( "[%s]: collected logs total chart enable = %s", p_file_info->chartname,
|
|
(p_file_info->parser_config->chart_config & CHART_COLLECTED_LOGS_TOTAL) ? "yes" : "no");
|
|
|
|
if(appconfig_get_boolean(&log_management_config, config_section->name,
|
|
"collected logs rate chart enable",
|
|
g_logs_manag_config.enable_collected_logs_rate)){
|
|
p_file_info->parser_config->chart_config |= CHART_COLLECTED_LOGS_RATE;
|
|
}
|
|
collector_info( "[%s]: collected logs rate chart enable = %s", p_file_info->chartname,
|
|
(p_file_info->parser_config->chart_config & CHART_COLLECTED_LOGS_RATE) ? "yes" : "no");
|
|
|
|
|
|
/* -------------------------------------------------------------------------
|
|
* Deal with log-type-specific configuration options.
|
|
* ------------------------------------------------------------------------- */
|
|
|
|
if(p_file_info->log_type == FLB_TAIL || p_file_info->log_type == FLB_WEB_LOG){
|
|
Flb_tail_config_t *tail_config = callocz(1, sizeof(Flb_tail_config_t));
|
|
if(appconfig_get_boolean(&log_management_config, config_section->name, "use inotify", CONFIG_BOOLEAN_YES))
|
|
tail_config->use_inotify = 1;
|
|
collector_info( "[%s]: use inotify = %s", p_file_info->chartname, tail_config->use_inotify? "yes" : "no");
|
|
|
|
p_file_info->flb_config = tail_config;
|
|
}
|
|
|
|
if(p_file_info->log_type == FLB_WEB_LOG){
|
|
/* Check if a valid web log format configuration is detected */
|
|
char *log_format = appconfig_get(&log_management_config, config_section->name, "log format", LOG_PATH_AUTO);
|
|
const char delimiter = ' '; // TODO!!: TO READ FROM CONFIG
|
|
collector_info("[%s]: log format = %s", p_file_info->chartname, log_format ? log_format : "NULL!");
|
|
|
|
/* If "log format = auto" or no "log format" config is detected,
|
|
* try log format autodetection based on last log file line.
|
|
* TODO 1: Add another case in OR where log_format is compared with a valid reg exp.
|
|
* TODO 2: Set default log format and delimiter if not found in config? Or auto-detect? */
|
|
if(!log_format || !*log_format || !strcmp(log_format, LOG_PATH_AUTO)){
|
|
collector_info("[%s]: Attempting auto-detection of log format", p_file_info->chartname);
|
|
char *line = read_last_line(p_file_info->filename, 0);
|
|
if(!line){
|
|
collector_error("[%s]: read_last_line() returned NULL", p_file_info->chartname);
|
|
return p_file_info_destroy(p_file_info);
|
|
}
|
|
p_file_info->parser_config->gen_config = auto_detect_web_log_parser_config(line, delimiter);
|
|
freez(line);
|
|
}
|
|
else{
|
|
p_file_info->parser_config->gen_config = read_web_log_parser_config(log_format, delimiter);
|
|
collector_info( "[%s]: Read web log parser config: %s", p_file_info->chartname,
|
|
p_file_info->parser_config->gen_config ? "success!" : "failed!");
|
|
}
|
|
freez(log_format);
|
|
|
|
if(!p_file_info->parser_config->gen_config){
|
|
collector_error("[%s]: No valid web log parser config found", p_file_info->chartname);
|
|
return p_file_info_destroy(p_file_info);
|
|
}
|
|
|
|
/* Check whether metrics verification during parsing is required */
|
|
Web_log_parser_config_t *wblp_config = (Web_log_parser_config_t *) p_file_info->parser_config->gen_config;
|
|
wblp_config->verify_parsed_logs = appconfig_get_boolean( &log_management_config, config_section->name,
|
|
"verify parsed logs", CONFIG_BOOLEAN_NO);
|
|
collector_info("[%s]: verify parsed logs = %d", p_file_info->chartname, wblp_config->verify_parsed_logs);
|
|
|
|
wblp_config->skip_timestamp_parsing = p_file_info->use_log_timestamp ? 0 : 1;
|
|
collector_info("[%s]: skip_timestamp_parsing = %d", p_file_info->chartname, wblp_config->skip_timestamp_parsing);
|
|
|
|
for(int j = 0; j < wblp_config->num_fields; j++){
|
|
if((wblp_config->fields[j] == VHOST_WITH_PORT || wblp_config->fields[j] == VHOST)
|
|
&& appconfig_get_boolean(&log_management_config, config_section->name, "vhosts chart", CONFIG_BOOLEAN_NO)){
|
|
p_file_info->parser_config->chart_config |= CHART_VHOST;
|
|
}
|
|
if((wblp_config->fields[j] == VHOST_WITH_PORT || wblp_config->fields[j] == PORT)
|
|
&& appconfig_get_boolean(&log_management_config, config_section->name, "ports chart", CONFIG_BOOLEAN_NO)){
|
|
p_file_info->parser_config->chart_config |= CHART_PORT;
|
|
}
|
|
if((wblp_config->fields[j] == REQ_CLIENT)
|
|
&& appconfig_get_boolean(&log_management_config, config_section->name, "IP versions chart", CONFIG_BOOLEAN_NO)){
|
|
p_file_info->parser_config->chart_config |= CHART_IP_VERSION;
|
|
}
|
|
if((wblp_config->fields[j] == REQ_CLIENT)
|
|
&& appconfig_get_boolean(&log_management_config, config_section->name, "unique client IPs - current poll chart", CONFIG_BOOLEAN_NO)){
|
|
p_file_info->parser_config->chart_config |= CHART_REQ_CLIENT_CURRENT;
|
|
}
|
|
if((wblp_config->fields[j] == REQ_CLIENT)
|
|
&& appconfig_get_boolean(&log_management_config, config_section->name, "unique client IPs - all-time chart", CONFIG_BOOLEAN_NO)){
|
|
p_file_info->parser_config->chart_config |= CHART_REQ_CLIENT_ALL_TIME;
|
|
}
|
|
if((wblp_config->fields[j] == REQ || wblp_config->fields[j] == REQ_METHOD)
|
|
&& appconfig_get_boolean(&log_management_config, config_section->name, "http request methods chart", CONFIG_BOOLEAN_NO)){
|
|
p_file_info->parser_config->chart_config |= CHART_REQ_METHODS;
|
|
}
|
|
if((wblp_config->fields[j] == REQ || wblp_config->fields[j] == REQ_PROTO)
|
|
&& appconfig_get_boolean(&log_management_config, config_section->name, "http protocol versions chart", CONFIG_BOOLEAN_NO)){
|
|
p_file_info->parser_config->chart_config |= CHART_REQ_PROTO;
|
|
}
|
|
if((wblp_config->fields[j] == REQ_SIZE || wblp_config->fields[j] == RESP_SIZE)
|
|
&& appconfig_get_boolean(&log_management_config, config_section->name, "bandwidth chart", CONFIG_BOOLEAN_NO)){
|
|
p_file_info->parser_config->chart_config |= CHART_BANDWIDTH;
|
|
}
|
|
if((wblp_config->fields[j] == REQ_PROC_TIME)
|
|
&& appconfig_get_boolean(&log_management_config, config_section->name, "timings chart", CONFIG_BOOLEAN_NO)){
|
|
p_file_info->parser_config->chart_config |= CHART_REQ_PROC_TIME;
|
|
}
|
|
if((wblp_config->fields[j] == RESP_CODE)
|
|
&& appconfig_get_boolean(&log_management_config, config_section->name, "response code families chart", CONFIG_BOOLEAN_NO)){
|
|
p_file_info->parser_config->chart_config |= CHART_RESP_CODE_FAMILY;
|
|
}
|
|
if((wblp_config->fields[j] == RESP_CODE)
|
|
&& appconfig_get_boolean(&log_management_config, config_section->name, "response codes chart", CONFIG_BOOLEAN_NO)){
|
|
p_file_info->parser_config->chart_config |= CHART_RESP_CODE;
|
|
}
|
|
if((wblp_config->fields[j] == RESP_CODE)
|
|
&& appconfig_get_boolean(&log_management_config, config_section->name, "response code types chart", CONFIG_BOOLEAN_NO)){
|
|
p_file_info->parser_config->chart_config |= CHART_RESP_CODE_TYPE;
|
|
}
|
|
if((wblp_config->fields[j] == SSL_PROTO)
|
|
&& appconfig_get_boolean(&log_management_config, config_section->name, "SSL protocols chart", CONFIG_BOOLEAN_NO)){
|
|
p_file_info->parser_config->chart_config |= CHART_SSL_PROTO;
|
|
}
|
|
if((wblp_config->fields[j] == SSL_CIPHER_SUITE)
|
|
&& appconfig_get_boolean(&log_management_config, config_section->name, "SSL chipher suites chart", CONFIG_BOOLEAN_NO)){
|
|
p_file_info->parser_config->chart_config |= CHART_SSL_CIPHER;
|
|
}
|
|
}
|
|
}
|
|
else if(p_file_info->log_type == FLB_KMSG){
|
|
if(appconfig_get_boolean(&log_management_config, config_section->name, "severity chart", CONFIG_BOOLEAN_NO)) {
|
|
p_file_info->parser_config->chart_config |= CHART_SYSLOG_SEVER;
|
|
}
|
|
if(appconfig_get_boolean(&log_management_config, config_section->name, "subsystem chart", CONFIG_BOOLEAN_NO)) {
|
|
p_file_info->parser_config->chart_config |= CHART_KMSG_SUBSYSTEM;
|
|
}
|
|
if(appconfig_get_boolean(&log_management_config, config_section->name, "device chart", CONFIG_BOOLEAN_NO)) {
|
|
p_file_info->parser_config->chart_config |= CHART_KMSG_DEVICE;
|
|
}
|
|
}
|
|
else if(p_file_info->log_type == FLB_SYSTEMD || p_file_info->log_type == FLB_SYSLOG){
|
|
if(p_file_info->log_type == FLB_SYSLOG){
|
|
Syslog_parser_config_t *syslog_config = callocz(1, sizeof(Syslog_parser_config_t));
|
|
|
|
/* Read syslog format */
|
|
syslog_config->log_format = appconfig_get( &log_management_config,
|
|
config_section->name,
|
|
"log format", NULL);
|
|
collector_info("[%s]: log format = %s", p_file_info->chartname,
|
|
syslog_config->log_format ? syslog_config->log_format : "NULL!");
|
|
if(!syslog_config->log_format || !*syslog_config->log_format || !strcasecmp(syslog_config->log_format, "auto")){
|
|
freez(syslog_config->log_format);
|
|
freez(syslog_config);
|
|
return p_file_info_destroy(p_file_info);
|
|
}
|
|
|
|
syslog_config->socket_config = callocz(1, sizeof(Flb_socket_config_t));
|
|
|
|
/* Read syslog socket mode
|
|
* see also https://docs.fluentbit.io/manual/pipeline/inputs/syslog#configuration-parameters */
|
|
syslog_config->socket_config->mode = appconfig_get( &log_management_config,
|
|
config_section->name,
|
|
"mode", "unix_udp");
|
|
collector_info("[%s]: mode = %s", p_file_info->chartname, syslog_config->socket_config->mode);
|
|
|
|
/* Check for valid socket path if (mode == unix_udp) or
|
|
* (mode == unix_tcp), else read syslog network interface to bind,
|
|
* if (mode == udp) or (mode == tcp). */
|
|
if( !strcasecmp(syslog_config->socket_config->mode, "unix_udp") ||
|
|
!strcasecmp(syslog_config->socket_config->mode, "unix_tcp")){
|
|
if(!p_file_info->filename || !*p_file_info->filename || !strcasecmp(p_file_info->filename, LOG_PATH_AUTO)){
|
|
// freez(syslog_config->socket_config->mode);
|
|
freez(syslog_config->socket_config);
|
|
freez(syslog_config->log_format);
|
|
freez(syslog_config);
|
|
return p_file_info_destroy(p_file_info);
|
|
}
|
|
syslog_config->socket_config->unix_perm = appconfig_get(&log_management_config,
|
|
config_section->name,
|
|
"unix_perm", "0644");
|
|
collector_info("[%s]: unix_perm = %s", p_file_info->chartname, syslog_config->socket_config->unix_perm);
|
|
} else if( !strcasecmp(syslog_config->socket_config->mode, "udp") ||
|
|
!strcasecmp(syslog_config->socket_config->mode, "tcp")){
|
|
// TODO: Check if listen is in valid format
|
|
syslog_config->socket_config->listen = appconfig_get( &log_management_config,
|
|
config_section->name,
|
|
"listen", "0.0.0.0");
|
|
collector_info("[%s]: listen = %s", p_file_info->chartname, syslog_config->socket_config->listen);
|
|
syslog_config->socket_config->port = appconfig_get( &log_management_config,
|
|
config_section->name,
|
|
"port", "5140");
|
|
collector_info("[%s]: port = %s", p_file_info->chartname, syslog_config->socket_config->port);
|
|
} else {
|
|
/* Any other modes are invalid */
|
|
// freez(syslog_config->socket_config->mode);
|
|
freez(syslog_config->socket_config);
|
|
freez(syslog_config->log_format);
|
|
freez(syslog_config);
|
|
return p_file_info_destroy(p_file_info);
|
|
}
|
|
|
|
p_file_info->parser_config->gen_config = syslog_config;
|
|
}
|
|
if(appconfig_get_boolean(&log_management_config, config_section->name, "priority value chart", CONFIG_BOOLEAN_NO)) {
|
|
p_file_info->parser_config->chart_config |= CHART_SYSLOG_PRIOR;
|
|
}
|
|
if(appconfig_get_boolean(&log_management_config, config_section->name, "severity chart", CONFIG_BOOLEAN_NO)) {
|
|
p_file_info->parser_config->chart_config |= CHART_SYSLOG_SEVER;
|
|
}
|
|
if(appconfig_get_boolean(&log_management_config, config_section->name, "facility chart", CONFIG_BOOLEAN_NO)) {
|
|
p_file_info->parser_config->chart_config |= CHART_SYSLOG_FACIL;
|
|
}
|
|
}
|
|
else if(p_file_info->log_type == FLB_DOCKER_EV){
|
|
if(appconfig_get_boolean(&log_management_config, config_section->name, "event type chart", CONFIG_BOOLEAN_NO)) {
|
|
p_file_info->parser_config->chart_config |= CHART_DOCKER_EV_TYPE;
|
|
}
|
|
if(appconfig_get_boolean(&log_management_config, config_section->name, "event action chart", CONFIG_BOOLEAN_NO)) {
|
|
p_file_info->parser_config->chart_config |= CHART_DOCKER_EV_ACTION;
|
|
}
|
|
}
|
|
else if(p_file_info->log_type == FLB_SERIAL){
|
|
Flb_serial_config_t *serial_config = callocz(1, sizeof(Flb_serial_config_t));
|
|
|
|
serial_config->bitrate = appconfig_get(&log_management_config, config_section->name, "bitrate", "115200");
|
|
serial_config->min_bytes = appconfig_get(&log_management_config, config_section->name, "min bytes", "1");
|
|
serial_config->separator = appconfig_get(&log_management_config, config_section->name, "separator", "");
|
|
serial_config->format = appconfig_get(&log_management_config, config_section->name, "format", "");
|
|
|
|
p_file_info->flb_config = serial_config;
|
|
}
|
|
else if(p_file_info->log_type == FLB_MQTT){
|
|
Flb_socket_config_t *socket_config = callocz(1, sizeof(Flb_socket_config_t));
|
|
|
|
socket_config->listen = appconfig_get(&log_management_config, config_section->name, "listen", "0.0.0.0");
|
|
socket_config->port = appconfig_get(&log_management_config, config_section->name, "port", "1883");
|
|
|
|
p_file_info->flb_config = socket_config;
|
|
|
|
if(appconfig_get_boolean(&log_management_config, config_section->name, "topic chart", CONFIG_BOOLEAN_NO)) {
|
|
p_file_info->parser_config->chart_config |= CHART_MQTT_TOPIC;
|
|
}
|
|
}
|
|
|
|
|
|
/* -------------------------------------------------------------------------
|
|
* Allocate p_file_info->parser_metrics memory.
|
|
* ------------------------------------------------------------------------- */
|
|
p_file_info->parser_metrics = callocz(1, sizeof(Log_parser_metrics_t));
|
|
switch(p_file_info->log_type){
|
|
case FLB_WEB_LOG:{
|
|
p_file_info->parser_metrics->web_log = callocz(1, sizeof(Web_log_metrics_t));
|
|
break;
|
|
}
|
|
case FLB_KMSG: {
|
|
p_file_info->parser_metrics->kernel = callocz(1, sizeof(Kernel_metrics_t));
|
|
p_file_info->parser_metrics->kernel->subsystem = dictionary_create( DICT_OPTION_SINGLE_THREADED |
|
|
DICT_OPTION_NAME_LINK_DONT_CLONE |
|
|
DICT_OPTION_DONT_OVERWRITE_VALUE);
|
|
dictionary_register_conflict_callback(p_file_info->parser_metrics->kernel->subsystem, metrics_dict_conflict_cb, NULL);
|
|
p_file_info->parser_metrics->kernel->device = dictionary_create(DICT_OPTION_SINGLE_THREADED |
|
|
DICT_OPTION_NAME_LINK_DONT_CLONE |
|
|
DICT_OPTION_DONT_OVERWRITE_VALUE);
|
|
dictionary_register_conflict_callback(p_file_info->parser_metrics->kernel->device, metrics_dict_conflict_cb, NULL);
|
|
break;
|
|
}
|
|
case FLB_SYSTEMD:
|
|
case FLB_SYSLOG: {
|
|
p_file_info->parser_metrics->systemd = callocz(1, sizeof(Systemd_metrics_t));
|
|
break;
|
|
}
|
|
case FLB_DOCKER_EV: {
|
|
p_file_info->parser_metrics->docker_ev = callocz(1, sizeof(Docker_ev_metrics_t));
|
|
break;
|
|
}
|
|
case FLB_MQTT: {
|
|
p_file_info->parser_metrics->mqtt = callocz(1, sizeof(Mqtt_metrics_t));
|
|
p_file_info->parser_metrics->mqtt->topic = dictionary_create( DICT_OPTION_SINGLE_THREADED |
|
|
DICT_OPTION_NAME_LINK_DONT_CLONE |
|
|
DICT_OPTION_DONT_OVERWRITE_VALUE);
|
|
dictionary_register_conflict_callback(p_file_info->parser_metrics->mqtt->topic, metrics_dict_conflict_cb, NULL);
|
|
break;
|
|
}
|
|
default:
|
|
break;
|
|
}
|
|
|
|
|
|
/* -------------------------------------------------------------------------
|
|
* Configure (optional) custom charts.
|
|
* ------------------------------------------------------------------------- */
|
|
p_file_info->parser_cus_config = callocz(1, sizeof(Log_parser_cus_config_t *));
|
|
p_file_info->parser_metrics->parser_cus = callocz(1, sizeof(Log_parser_cus_metrics_t *));
|
|
for(int cus_off = 1; cus_off <= MAX_CUS_CHARTS_PER_SOURCE; cus_off++){
|
|
|
|
/* Read chart name config */
|
|
char *cus_chart_k = mallocz(snprintf(NULL, 0, "custom %d chart", MAX_CUS_CHARTS_PER_SOURCE) + 1);
|
|
sprintf(cus_chart_k, "custom %d chart", cus_off);
|
|
char *cus_chart_v = appconfig_get(&log_management_config, config_section->name, cus_chart_k, NULL);
|
|
debug_log( "cus chart: (%s:%s)", cus_chart_k, cus_chart_v ? cus_chart_v : "NULL");
|
|
freez(cus_chart_k);
|
|
if(unlikely(!cus_chart_v)){
|
|
collector_error("[%s]: custom %d chart = NULL, custom charts for this log source will be disabled.",
|
|
p_file_info->chartname, cus_off);
|
|
break;
|
|
}
|
|
netdata_fix_chart_id(cus_chart_v);
|
|
|
|
/* Read regex config */
|
|
char *cus_regex_k = mallocz(snprintf(NULL, 0, "custom %d regex", MAX_CUS_CHARTS_PER_SOURCE) + 1);
|
|
sprintf(cus_regex_k, "custom %d regex", cus_off);
|
|
char *cus_regex_v = appconfig_get(&log_management_config, config_section->name, cus_regex_k, NULL);
|
|
debug_log( "cus regex: (%s:%s)", cus_regex_k, cus_regex_v ? cus_regex_v : "NULL");
|
|
freez(cus_regex_k);
|
|
if(unlikely(!cus_regex_v)) {
|
|
collector_error("[%s]: custom %d regex = NULL, custom charts for this log source will be disabled.",
|
|
p_file_info->chartname, cus_off);
|
|
freez(cus_chart_v);
|
|
break;
|
|
}
|
|
|
|
/* Read regex name config */
|
|
char *cus_regex_name_k = mallocz(snprintf(NULL, 0, "custom %d regex name", MAX_CUS_CHARTS_PER_SOURCE) + 1);
|
|
sprintf(cus_regex_name_k, "custom %d regex name", cus_off);
|
|
char *cus_regex_name_v = appconfig_get( &log_management_config, config_section->name,
|
|
cus_regex_name_k, cus_regex_v);
|
|
debug_log( "cus regex name: (%s:%s)", cus_regex_name_k, cus_regex_name_v ? cus_regex_name_v : "NULL");
|
|
freez(cus_regex_name_k);
|
|
m_assert(cus_regex_name_v, "cus_regex_name_v cannot be NULL, should be cus_regex_v");
|
|
|
|
|
|
/* Escape any backslashes in the regex name, to ensure dimension is displayed correctly in charts */
|
|
int regex_name_bslashes = 0;
|
|
char **p_regex_name = &cus_regex_name_v;
|
|
for(char *p = *p_regex_name; *p; p++) if(unlikely(*p == '\\')) regex_name_bslashes++;
|
|
if(regex_name_bslashes) {
|
|
*p_regex_name = reallocz(*p_regex_name, strlen(*p_regex_name) + 1 + regex_name_bslashes);
|
|
for(char *p = *p_regex_name; *p; p++){
|
|
if(unlikely(*p == '\\')){
|
|
memmove(p + 1, p, strlen(p) + 1);
|
|
*p++ = '\\';
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Read ignore case config */
|
|
char *cus_ignore_case_k = mallocz(snprintf(NULL, 0, "custom %d ignore case", MAX_CUS_CHARTS_PER_SOURCE) + 1);
|
|
sprintf(cus_ignore_case_k, "custom %d ignore case", cus_off);
|
|
int cus_ignore_case_v = appconfig_get_boolean( &log_management_config,
|
|
config_section->name, cus_ignore_case_k, CONFIG_BOOLEAN_YES);
|
|
debug_log( "cus case: (%s:%s)", cus_ignore_case_k, cus_ignore_case_v ? "yes" : "no");
|
|
freez(cus_ignore_case_k);
|
|
|
|
int regex_flags = cus_ignore_case_v ? REG_EXTENDED | REG_NEWLINE | REG_ICASE : REG_EXTENDED | REG_NEWLINE;
|
|
|
|
int rc;
|
|
regex_t regex;
|
|
if (unlikely((rc = regcomp(®ex, cus_regex_v, regex_flags)))){
|
|
size_t regcomp_err_str_size = regerror(rc, ®ex, 0, 0);
|
|
char *regcomp_err_str = mallocz(regcomp_err_str_size);
|
|
regerror(rc, ®ex, regcomp_err_str, regcomp_err_str_size);
|
|
collector_error("[%s]: could not compile regex for custom %d chart: %s due to error: %s. "
|
|
"Custom charts for this log source will be disabled.",
|
|
p_file_info->chartname, cus_off, cus_chart_v, regcomp_err_str);
|
|
freez(regcomp_err_str);
|
|
freez(cus_chart_v);
|
|
freez(cus_regex_v);
|
|
freez(cus_regex_name_v);
|
|
break;
|
|
};
|
|
|
|
/* Allocate memory and copy config to p_file_info->parser_cus_config struct */
|
|
p_file_info->parser_cus_config = reallocz( p_file_info->parser_cus_config,
|
|
(cus_off + 1) * sizeof(Log_parser_cus_config_t *));
|
|
p_file_info->parser_cus_config[cus_off - 1] = callocz(1, sizeof(Log_parser_cus_config_t));
|
|
|
|
p_file_info->parser_cus_config[cus_off - 1]->chartname = cus_chart_v;
|
|
p_file_info->parser_cus_config[cus_off - 1]->regex_str = cus_regex_v;
|
|
p_file_info->parser_cus_config[cus_off - 1]->regex_name = cus_regex_name_v;
|
|
p_file_info->parser_cus_config[cus_off - 1]->regex = regex;
|
|
|
|
/* Initialise custom log parser metrics struct array */
|
|
p_file_info->parser_metrics->parser_cus = reallocz( p_file_info->parser_metrics->parser_cus,
|
|
(cus_off + 1) * sizeof(Log_parser_cus_metrics_t *));
|
|
p_file_info->parser_metrics->parser_cus[cus_off - 1] = callocz(1, sizeof(Log_parser_cus_metrics_t));
|
|
|
|
|
|
p_file_info->parser_cus_config[cus_off] = NULL;
|
|
p_file_info->parser_metrics->parser_cus[cus_off] = NULL;
|
|
}
|
|
|
|
|
|
/* -------------------------------------------------------------------------
|
|
* Configure (optional) Fluent Bit outputs.
|
|
* ------------------------------------------------------------------------- */
|
|
|
|
Flb_output_config_t **output_next_p = &p_file_info->flb_outputs;
|
|
for(int out_off = 1; out_off <= MAX_OUTPUTS_PER_SOURCE; out_off++){
|
|
|
|
/* Read output plugin */
|
|
char *out_plugin_k = callocz(1, snprintf(NULL, 0, "output %d " FLB_OUTPUT_PLUGIN_NAME_KEY, MAX_OUTPUTS_PER_SOURCE) + 1);
|
|
sprintf(out_plugin_k, "output %d " FLB_OUTPUT_PLUGIN_NAME_KEY, out_off);
|
|
char *out_plugin_v = appconfig_get(&log_management_config, config_section->name, out_plugin_k, NULL);
|
|
debug_log( "output %d "FLB_OUTPUT_PLUGIN_NAME_KEY": %s", out_off, out_plugin_v ? out_plugin_v : "NULL");
|
|
freez(out_plugin_k);
|
|
if(unlikely(!out_plugin_v)){
|
|
collector_error("[%s]: output %d "FLB_OUTPUT_PLUGIN_NAME_KEY" = NULL, outputs for this log source will be disabled.",
|
|
p_file_info->chartname, out_off);
|
|
break;
|
|
}
|
|
|
|
Flb_output_config_t *output = callocz(1, sizeof(Flb_output_config_t));
|
|
output->id = out_off;
|
|
output->plugin = out_plugin_v;
|
|
|
|
/* Read parameters for this output */
|
|
avl_traverse_lock(&config_section->values_index, flb_output_param_get_cb, output);
|
|
|
|
*output_next_p = output;
|
|
output_next_p = &output->next;
|
|
}
|
|
|
|
|
|
/* -------------------------------------------------------------------------
|
|
* Read circular buffer configuration and initialize the buffer.
|
|
* ------------------------------------------------------------------------- */
|
|
size_t circular_buffer_max_size = ((size_t)appconfig_get_number(&log_management_config,
|
|
config_section->name,
|
|
"circular buffer max size MiB",
|
|
g_logs_manag_config.circ_buff_max_size_in_mib)) MiB;
|
|
if(circular_buffer_max_size > CIRCULAR_BUFF_MAX_SIZE_RANGE_MAX) {
|
|
circular_buffer_max_size = CIRCULAR_BUFF_MAX_SIZE_RANGE_MAX;
|
|
collector_info( "[%s]: circular buffer max size out of range. Using maximum permitted value (MiB): %zu",
|
|
p_file_info->chartname, (size_t) (circular_buffer_max_size / (1 MiB)));
|
|
} else if(circular_buffer_max_size < CIRCULAR_BUFF_MAX_SIZE_RANGE_MIN) {
|
|
circular_buffer_max_size = CIRCULAR_BUFF_MAX_SIZE_RANGE_MIN;
|
|
collector_info( "[%s]: circular buffer max size out of range. Using minimum permitted value (MiB): %zu",
|
|
p_file_info->chartname, (size_t) (circular_buffer_max_size / (1 MiB)));
|
|
}
|
|
collector_info("[%s]: circular buffer max size MiB = %zu", p_file_info->chartname, (size_t) (circular_buffer_max_size / (1 MiB)));
|
|
|
|
int circular_buffer_allow_dropped_logs = appconfig_get_boolean( &log_management_config,
|
|
config_section->name,
|
|
"circular buffer drop logs if full",
|
|
g_logs_manag_config.circ_buff_drop_logs);
|
|
collector_info("[%s]: circular buffer drop logs if full = %s", p_file_info->chartname,
|
|
circular_buffer_allow_dropped_logs ? "yes" : "no");
|
|
|
|
p_file_info->circ_buff = circ_buff_init(p_file_info->buff_flush_to_db_interval,
|
|
circular_buffer_max_size,
|
|
circular_buffer_allow_dropped_logs);
|
|
|
|
|
|
/* -------------------------------------------------------------------------
|
|
* Initialize rrd related structures.
|
|
* ------------------------------------------------------------------------- */
|
|
p_file_info->chart_meta = callocz(1, sizeof(struct Chart_meta));
|
|
memcpy(p_file_info->chart_meta, &chart_types[p_file_info->log_type], sizeof(struct Chart_meta));
|
|
p_file_info->chart_meta->base_prio = NETDATA_CHART_PRIO_LOGS_BASE + p_file_infos_arr->count * NETDATA_CHART_PRIO_LOGS_INCR;
|
|
netdata_mutex_lock(stdout_mut);
|
|
p_file_info->chart_meta->init(p_file_info);
|
|
fflush(stdout);
|
|
netdata_mutex_unlock(stdout_mut);
|
|
|
|
/* -------------------------------------------------------------------------
|
|
* Initialize input plugin for local log sources.
|
|
* ------------------------------------------------------------------------- */
|
|
if(p_file_info->log_source == LOG_SOURCE_LOCAL){
|
|
int rc = flb_add_input(p_file_info);
|
|
if(unlikely(rc)){
|
|
collector_error("[%s]: flb_add_input() error: %d", p_file_info->chartname, rc);
|
|
return p_file_info_destroy(p_file_info);
|
|
}
|
|
}
|
|
|
|
/* flb_complete_item_timer_timeout_cb() is needed for both local and
|
|
* non-local sources. */
|
|
p_file_info->flb_tmp_buff_cpy_timer.data = p_file_info;
|
|
if(unlikely(0 != uv_mutex_init(&p_file_info->flb_tmp_buff_mut)))
|
|
fatal("uv_mutex_init(&p_file_info->flb_tmp_buff_mut) failed");
|
|
|
|
fatal_assert(0 == uv_timer_init( main_loop,
|
|
&p_file_info->flb_tmp_buff_cpy_timer));
|
|
|
|
fatal_assert(0 == uv_timer_start( &p_file_info->flb_tmp_buff_cpy_timer,
|
|
flb_complete_item_timer_timeout_cb, 0,
|
|
p_file_info->update_timeout * MSEC_PER_SEC));
|
|
|
|
|
|
/* -------------------------------------------------------------------------
|
|
* All set up successfully - add p_file_info to list of all p_file_info structs.
|
|
* ------------------------------------------------------------------------- */
|
|
p_file_infos_arr->data = reallocz(p_file_infos_arr->data, (++p_file_infos_arr->count) * (sizeof p_file_info));
|
|
p_file_infos_arr->data[p_file_infos_arr->count - 1] = p_file_info;
|
|
|
|
__atomic_store_n(&p_file_info->state, LOG_SRC_READY, __ATOMIC_RELAXED);
|
|
|
|
collector_info("[%s]: initialization completed", p_file_info->chartname);
|
|
}
|
|
|
|
void config_file_load( uv_loop_t *main_loop,
|
|
Flb_socket_config_t *p_forward_in_config,
|
|
flb_srvc_config_t *p_flb_srvc_config,
|
|
netdata_mutex_t *stdout_mut){
|
|
|
|
int user_default_conf_found = 0;
|
|
|
|
struct section *config_section;
|
|
|
|
char tmp_name[FILENAME_MAX + 1];
|
|
snprintfz(tmp_name, FILENAME_MAX, "%s/logsmanagement.d", get_user_config_dir());
|
|
DIR *dir = opendir(tmp_name);
|
|
|
|
if(dir){
|
|
struct dirent *de = NULL;
|
|
while ((de = readdir(dir))) {
|
|
size_t d_name_len = strlen(de->d_name);
|
|
if (de->d_type == DT_DIR || d_name_len < 6 || strncmp(&de->d_name[d_name_len - 5], ".conf", sizeof(".conf")))
|
|
continue;
|
|
|
|
if(!user_default_conf_found && !strncmp(de->d_name, "default.conf", sizeof("default.conf")))
|
|
user_default_conf_found = 1;
|
|
|
|
snprintfz(tmp_name, FILENAME_MAX, "%s/logsmanagement.d/%s", get_user_config_dir(), de->d_name);
|
|
collector_info("loading config:%s", tmp_name);
|
|
log_management_config = (struct config){
|
|
.first_section = NULL,
|
|
.last_section = NULL,
|
|
.mutex = NETDATA_MUTEX_INITIALIZER,
|
|
.index = {
|
|
.avl_tree = {
|
|
.root = NULL,
|
|
.compar = appconfig_section_compare
|
|
},
|
|
.rwlock = AVL_LOCK_INITIALIZER
|
|
}
|
|
};
|
|
if(!appconfig_load(&log_management_config, tmp_name, 0, NULL))
|
|
continue;
|
|
|
|
config_section = log_management_config.first_section;
|
|
do {
|
|
config_section_init(main_loop, config_section, p_forward_in_config, p_flb_srvc_config, stdout_mut);
|
|
config_section = config_section->next;
|
|
} while(config_section);
|
|
|
|
}
|
|
closedir(dir);
|
|
}
|
|
|
|
if(!user_default_conf_found){
|
|
collector_info("CONFIG: cannot load user config '%s/logsmanagement.d/default.conf'. Will try stock config.", get_user_config_dir());
|
|
snprintfz(tmp_name, FILENAME_MAX, "%s/logsmanagement.d/default.conf", get_stock_config_dir());
|
|
if(!appconfig_load(&log_management_config, tmp_name, 0, NULL)){
|
|
collector_error("CONFIG: cannot load stock config '%s/logsmanagement.d/default.conf'. Logs management will be disabled.", get_stock_config_dir());
|
|
exit(1);
|
|
}
|
|
|
|
config_section = log_management_config.first_section;
|
|
do {
|
|
config_section_init(main_loop, config_section, p_forward_in_config, p_flb_srvc_config, stdout_mut);
|
|
config_section = config_section->next;
|
|
} while(config_section);
|
|
}
|
|
}
|