mirror of
https://github.com/netdata/netdata.git
synced 2025-04-16 18:37:50 +00:00

This PR adds the logs-management external plugin. See the included README for an extensive list of features. ------------------------------------------------------------------------------------- * Add proper status return in JSON response of functions * Add column info to functions * Escape special characters when returning JSON response * Add proper functions help and defaults. Fix help not working * Add 'logs_management_meta' object in functions results * Fix compiler warnings * Replace tabs with 3 spaces in web_client_api_request_v1_logsmanagement_sources() * Add 'sources' in functions to display list of log sources * Update functions column values for logs * Update chart titles and remove '/s' from units * Add support for compound queries in circular buffers * Refactor circ_buff_search() to get rid of circ_buff_search_compound() * Fix incorrect docker events nano timestamp padding * Fixed botched rebasing * Replace get_unix_time_ms() with now_realtime_msec() * Remove binary generation from Fluent-Bit lib build * Fix compiler warnings due to new timestamp type * Remove STDIN and STDOUT support from Fluent-Bit library * Initial support for FLB_KMSG kernel logs collection * Add kernel logs charts * Add kernel logs subsystem and device charts * Skip collection of pre-existing logs in kmsg ring buffer * Add example of custom kmsg charts * Add extra initialization error logs * Fix bug of Docker Events collector failure disabling whole logs management engine * Remove reduntant FLB output code * Remove some obsolete TODO comments * Remove some commented out error/debug prints * Disable some Fluent-Bit config options not required * Make circular buffer spare items option configurable * Add DB mode configuration option * Replace p_file_infos_arr->data[i] with p_file_info in db_api.c * Remove db_loop due to all function calls being synchronous * Add initial README.md * Add DB mode = none changes * Add a simple webpage to visualize log query results * Add support for source selection to logs_query.html * Add option to query multiple log sources * Mark non-queryable sources as such in logs_query.html * Add option to use either GET or functions request in logs_query.html * Install logs_query.html when running stress tests * Update README.md requirements * Change installer behavior to build logs management by default * Disable logs management at runtime by default * Add global db mode configuration in 'logs management' config section * Split logsmanagement.conf into required & optional sections * Remove --enable-logsmanagement from stress test script * Add global config option for 'circular buffer max size MiB' * Add global config option for 'circular buffer drop logs if full' * Update 'General Configuration' in README.md * Add global config option for remaining optional settings * Add systemd collector requirements to TOC * README: Convert general configuration to table * README: Fix previous botched commit * Enable logs management by default when building for stress testing * Move logging to collector.log from error.log * Fix contenttype compilation errors * Move logging to collector.log in plugin_logsmanagement.c * Rename 'rows' to 'records' in charts * Add Netdata error.log parsing * Add more dashboard descriptions * Sanitize chart ids * Attempt to fix failing CI * Update README.md * Update README.md * Another attempt to fix CI failures * Fix undefined reference to 'uv_sleep' on certain platforms * Support FLB forward input and FLB output plugins. Squashed commit of the following: commit 55e2bf4fb34a2e02ffd0b280790197310a5299f3 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Apr 13 16:41:09 2023 +0300 Remove error.log from stock config commit bbdc62c2c9727359bc3c8ef8c33ee734d0039be7 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Apr 13 16:37:48 2023 +0300 Add cleanup of Fluent Bit outputs in p_file_info_destroy() commit 09b0aa4268ec1ccef160c99c5d5f31b6388edd28 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Apr 13 14:34:17 2023 +0300 Some code and config cleanup commit 030d074667d5ee2cad10f85cd836ca90e29346ad Author: Dim-P <dimitris1703@gmail.com> Date: Thu Apr 13 13:04:08 2023 +0300 Enable additional Fluent Bit output plugins for shared library commit 490aa5d44caa38042521d24c6b886b8b4a59a73c Author: Dim-P <dimitris1703@gmail.com> Date: Thu Apr 13 01:33:19 2023 +0300 Add initialization of Fluent Bit user-configured outputs commit c96e9fe9cea96549aa5eae09d0deeb130da02793 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Apr 4 23:13:16 2023 +0100 Complete read of parameters for FLB outputs config commit 00988897f9b86d1ecc5c141b19df7ad7d74f7e96 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Apr 3 19:43:31 2023 +0100 Update README.md commit 6deea5399c2707942aeaa51408f999ca45dfd351 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Apr 3 16:02:28 2023 +0100 Refactor Syslog_parser_config_t and add Flb_socket_config_t commit 7bf998a4c298bbd489ef735c56a6e85a137772c9 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Apr 3 14:19:57 2023 +0100 Update README.md commit c353d194b12c54f134936072ebaded0424d73cc0 Author: Dim-P <dimitris1703@gmail.com> Date: Fri Mar 31 14:52:57 2023 +0100 Update README.md commit 6be726eaff3738ba7884de799aa52949833af65a Author: Dim-P <dimitris1703@gmail.com> Date: Fri Mar 31 13:06:29 2023 +0100 Update README. Fix docker_events streaming commit 6aabfb0f1ef0529a7a0ecbaf940bc0952bf42518 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Mar 30 21:27:45 2023 +0100 Fix stuck in infinite loop bug for FLB_GENERIC, FLB_WEB_LOG and FLB_SERIAL remote log sources commit eea6346b708cc7a5ce6e2249366870f4924eabae Author: Dim-P <dimitris1703@gmail.com> Date: Thu Mar 30 21:04:12 2023 +0100 Remove callback that searches for streamed p_file_info match commit bc9c5a523b0b0ab5588adbff391a43ba8d9a0cdf Author: Dim-P <dimitris1703@gmail.com> Date: Thu Mar 30 15:51:39 2023 +0100 Basic streaming works commit 4c80f59f0214bc07895f0b2edca47cb02bc06420 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Mar 28 22:05:22 2023 +0100 WIP commit eeb37a71b602fb0738fe8077ccddc0a8ce632304 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Mar 27 22:52:09 2023 +0100 Add generic forward streaming input commit 1459b91847c80c4d97de96b75b00771039458ad6 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Mar 23 18:50:14 2023 +0000 FLB_FORWARD: WIP * Add number of logs per item in DB and in queries response * Fix wrong number of lines stored in DB for web logs * Refactor number of logs parsers and charts code * Add option to toggle number of collected logs metrics and charts * Disable kmsg log collector by default * Fix logs_query.html to work with any server ip * Fix regressed wrong number of web log lines bug * Change query quota type from size_t to long long * Update alpine version when searching for fts-dev requirements * Update query results to return both requested and actual quota * Fix bug of circ buffs not being read if head == read but not empty * Squashed commit of the following: commit 34edb316a737f3edcffcf8fa88a3801599011495 Author: Dim-P <dimitris1703@gmail.com> Date: Thu May 4 20:02:36 2023 +0100 Comment out some debug prints commit 51b9b87a88516186530f5b4b65f785b543fefe8c Author: Dim-P <dimitris1703@gmail.com> Date: Fri Apr 28 19:21:54 2023 +0100 Fix wrong filenames in BLOBS_TABLE after rotation commit 6055fc2893b48661af324f20ee61511a40abbc02 Author: Dim-P <dimitris1703@gmail.com> Date: Fri Apr 28 12:22:04 2023 +0100 Add chart showing number of circular buffer items commit 0bb5210b0847f4b7596f633ec96fc10aa8ebc791 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Apr 25 16:47:29 2023 +0300 Various fixes. Fix num_lines calculation. Add debug prints for circ buffers. Remove circ buff spare items option. Fix calculation of circ buff memory consumption. Add buff_realloc_rwlock for db_mode = none case. Fix circ buff read to be done correctly when buff is full. commit f494af8c95be84404c7d854494d26da3bcbd3ad7 Author: Dim-P <dimitris1703@gmail.com> Date: Fri Apr 21 16:03:50 2023 +0300 Fix freez() on non-malloced address commit cce6d09e9cf9b847aface7309643e2c0a6041390 Author: Dim-P <dimitris1703@gmail.com> Date: Fri Apr 21 15:41:25 2023 +0300 Add option to dynamically expand circ buffs when full * Use log timestamps when possible, instead of collection timestamps. Also, add config options for Fluent Bit engine and remove tail_plugin. Squashed commit of the following: commit b16a02eb6e3a90565c90e0a274b87b123e7b18e5 Author: Dim-P <dimitris1703@gmail.com> Date: Tue May 16 19:38:57 2023 +0100 Add Fluent Bit service config options to netdata.conf. Add monitoring of new log file fluentbit.log commit ab77c286294548ea62a3879ac0f8b8bbfe6a0687 Author: Dim-P <dimitris1703@gmail.com> Date: Mon May 15 21:25:17 2023 +0100 Remove some debug prints commit 46d64ad2434e69b1d20720297aec1ddb869e1f84 Author: Dim-P <dimitris1703@gmail.com> Date: Mon May 15 21:19:32 2023 +0100 Fix null values in charts commit 8ec96821d6a882f28cbd19244ebdfc86c807d2f4 Author: Dim-P <dimitris1703@gmail.com> Date: Mon May 15 17:43:04 2023 +0100 Update README.md to reflect log timestamp changes commit 079a91858cf9db2f74711581235bc17eb97c7dad Author: Dim-P <dimitris1703@gmail.com> Date: Mon May 15 16:23:14 2023 +0100 Add configurable option for 'update timeout' commit 72b5e2505d4657fcbb5ccb6eeee00c45eb0b51ff Author: Dim-P <dimitris1703@gmail.com> Date: Mon May 15 16:05:08 2023 +0100 Revert logsmanagement.conf to logs-manag-master one commit 70d0ea6f8d272fff318aa3095d90a78dcc3411a7 Author: Dim-P <dimitris1703@gmail.com> Date: Mon May 15 16:02:00 2023 +0100 Fix bug of circ buff items not marked as done commit 5716420838771edb7842be4669bf96235b15cf71 Author: Dim-P <dimitris1703@gmail.com> Date: Mon May 15 16:01:41 2023 +0100 Fix do_custom_charts_update() to work for all log sources commit a8def8f53fd25c3efa56ef27e267df3261913a8e Author: Dim-P <dimitris1703@gmail.com> Date: Fri May 12 18:20:20 2023 +0100 Remove GENERIC and WEB_LOG cases. Remove tail_plugin.c/h. Remove generic_parser(). commit 1cf05966e33491dbeb9b877f18d1ea8643aabeba Author: Dim-P <dimitris1703@gmail.com> Date: Fri May 12 16:54:59 2023 +0100 Fix FLB_GENERIC and FLB_SERIAL to work with new timestamp logic commit df3266810531f1af5f99b666fbf44c503b304a39 Author: Dim-P <dimitris1703@gmail.com> Date: Fri May 12 14:55:04 2023 +0100 Get rid of *_collect() functions and restructure plugin_logsmanagement workers commit 3eee069842f3257fffe60dacfc274363bc43491c Author: Dim-P <dimitris1703@gmail.com> Date: Fri May 12 14:28:33 2023 +0100 Fix wrong order of #define _XOPEN_SOURCE 700 in parser.c commit 941aa80cb55d5a7d6fe8926da930d9803be52312 Author: Dim-P <dimitris1703@gmail.com> Date: Thu May 11 22:27:39 2023 +0100 Update plugin_logsmanagement_web_log to use new timestamp logic and to support delayed logs. Refactor req_method metrics code. commit 427a7d0e2366d43cb5eab7daa1ed82dfc3bc8bc8 Author: Dim-P <dimitris1703@gmail.com> Date: Tue May 9 20:26:08 2023 +0100 Update plugin_logsmanagement_kernel to use new timestamp logic and to support delayed charts commit a7e95a6d3e5c8b62531b671fd3ec7b8a3196b5bb Author: Dim-P <dimitris1703@gmail.com> Date: Tue May 9 15:22:14 2023 +0100 Update plugin_logsmanagement_systemd to use new timestamp logic and support delayed charts commit 48237ac2ce49c82abdf2783952fd9f0ef05d72e1 Author: Dim-P <dimitris1703@gmail.com> Date: Tue May 9 13:29:44 2023 +0100 Refactor number of collected logs chart update code commit a933c8fcae61c23fa0ec6d0074526ac5d243cf16 Author: Dim-P <dimitris1703@gmail.com> Date: Mon May 8 22:11:19 2023 +0100 Update plugin_logsmanagement_docker_ev to use new timestamp logic and support delayed charts commit 5d8db057155affd5cb721399a639d75a81801b7f Author: Dim-P <dimitris1703@gmail.com> Date: Fri May 5 15:18:06 2023 +0100 Change some Fluent Bit collectors to use log timestamps instead of collection timestamps * Remove some unused defines and typedefs * Improve flb_init() * Update file-level doxygen. Add SPDX license declaration. * Better handling of termination of Fluent Bit * Better handling of DB errors. Various fixes. Squashed commit of the following: commit f55feea1274c3857eda1e9d899743db6e3eb5bf5 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Jun 6 13:28:00 2023 +0100 Fix web log parsing in case of lines terminated by \r commit 9e05758a4ecfac57a0db14757cff9536deda51d8 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Jun 5 20:42:05 2023 +0100 Fix warnings due to -Wformat-truncation=2 commit 63477666fa42446d74693aae542580d4e1e81f03 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Jun 5 16:48:45 2023 +0100 Autodiscovery of Netdata error.log based on netdata_configured_log_dir commit cab5e6d6061f4259172bbf72666e8b4a3a35dd66 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Jun 5 16:24:39 2023 +0100 Replace Forward config default string literals with macros commit 4213398031dbb53afbc943d76bf7df202d12bf6f Author: Dim-P <dimitris1703@gmail.com> Date: Mon Jun 5 15:56:29 2023 +0100 Proper cleanup of flb_lib_out_cb *callback in case of error commit f76fd7cc7bc2d0241e4d3517f61ae192d4246300 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Jun 5 15:36:07 2023 +0100 Proper termination of Forward input and respective log sources in case of error commit 3739fd96c29e13298eb3a6e943a63172cdf39d5f Author: Dim-P <dimitris1703@gmail.com> Date: Thu Jun 1 21:19:56 2023 +0100 Merge db_search() and db_search_compound() commit fcface90cb0a6df3c3a2de5e1908b1b3467dd579 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Jun 1 19:17:26 2023 +0100 Proper error handling in db_search() and db_search_compound(). Refactor the code too. commit c10667ebee2510a1af77114b3a7e18a0054b5dae Author: Dim-P <dimitris1703@gmail.com> Date: Thu Jun 1 14:23:34 2023 +0100 Update DB mode and dir when switching to db_mode_none commit d37d4c3d79333bb9fa430650c13ad625458620e8 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Jun 1 12:56:13 2023 +0100 Fix flb_stop() SIGSEGV commit 892e231c68775ff1a1f052d292d26384f1ef54b1 Author: Dim-P <dimitris1703@gmail.com> Date: Tue May 30 21:14:58 2023 +0100 Switch to db_writer_db_mode_none if db_writer_db_mode_full encounters error commit f7a0c2135ff61d3a5b0460ec5964eb6bce164bd6 Author: Dim-P <dimitris1703@gmail.com> Date: Mon May 29 21:41:21 2023 +0100 Complete error handling changes to db_init(). Add some const type qualifiers. Refactor some code for readability commit 13dbeac936d22958394cb1aaec394384f5a93fdd Author: Dim-P <dimitris1703@gmail.com> Date: Mon May 29 17:14:17 2023 +0100 More error handling changes in db_init(). Change some global default settings if stress testing. commit eb0691c269cd09054190bf0ee9c4e9247b4a2548 Author: Dim-P <dimitris1703@gmail.com> Date: Fri May 26 23:29:12 2023 +0100 Better handling of db writer threads errors. Add db timings charts * Fix mystrsep() replaced by strsep_skip_consecutive_separators() * Fix older GCC failure due to label before declaration * Fix failed builds when using libuv <= v1.19 * Fix some Codacy warnings * Fix warning: implicit declaration of function ‘strsep’ * Use USEC_PER_SEC instead of 1000000ULL * Use UUID_STR_LEN instead of GUID_LEN + 1 * Combine multiple 'ln -sf' Docker instructions to one * Update README with systemd development libraries requirement * Comment out mallocz() success checkes in parser_csv() * Fix shellcheck warnings * Remove asserts for empty SYSLOG_IDENTIFIER or PID * Fix FreeBSD failing builds * Fix some more shellcheck warnings * Update Alpine fts-dev required packages * First changes to use web log timestamp for correct metrics timings * Initial work to add test_parse_web_log_line() unit test * Complete test_parse_web_log_line() tests * Improve parse_web_log_line() for better handling of \n, \r, double quotes etc. * Fix 'Invalid TIME error when timezone sign is negative * Add more logs to compression unit test case * Misc read_last_line() improvements * Fix failing test_auto_detect_web_log_parser_config() when test case terminated without '\n' * Remove unused preprocessor macro * Factor out setup of parse_config_expected_num_fields * Add test for count_fields() * Add unit test for read_last_line() * Fix a read_last_line() bug * Remove PLUGIN[logsmanagement] static thread and update charts synchronously, right before data buffering * Fix web log parser potential SIGSEGV * Fix web log metrics bug where they could show delayed by 1 collection interval * WIP: Add multiline support to kmsg logs and fix metric timings * Fix kmsg subsystem and device parsing and metrics * Add option 'use log timestamp' to select between log timestamps or collection timestamps * Add 'Getting Started' docs section * Move logs management functions code to separate source files * Add 'Nginx access.log' chart description * Remove logsmanagement.plugin source files * Fix some memory leaks * Improve cleanup of logsmanagement_main() * Fix a potential memory leak of fwd_input_out_cb * Better termination and cleanup of main_loop and its handles * Fix main_db_dir access() check bug * Avoid uv_walk() SIGSEGV * Remove main_db_dir access() check * Better termination and cleanup of DB code * Remove flb_socket_config_destroy() that could cause a segmentation fault * Disable unique client IPs - all-time chart by default * Update README.md * Fix debug() -> netdata_log_debug() * Fix read_last_line() * Fix timestamp sign adjustment and wrong unit tests * Change WEB_CLIENT_ACL_DASHBOARD to WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC * Do not parse web log timestamps if 'use_log_timestamp = no' * Add Logs Management back into buildinfo.c * Update README.md * Do not build Fluent Bit executable binary * Change logs rate chart to RRDSET_TYPE_LINE * Add kludge to prevent metrics breaking due to out of order logs * Fix wrong flb_tmp_buff_cpy_timer expiration * Refactor initialization of input plugin for local log sources. * Rename FLB_GENERIC collector to FLB_TAIL. * Switch 'Netdata fluentbit.log' to disabled by default * Add 'use inotify' configuration option * Update in README.md * Add docker event actions metrics * Update README.md to include event action chart * Remove commented out PLUGIN[logsmanagement] code block * Fix some warnings * Add documentation for outgoing log streaming and exporting * Fix some code block formatting in README.md * Refactor code related to error status of log query results and add new invalid timestamp case * Reduce query mem allocs and fix end timestamp == 0 bug * Add support for duplicate timestamps in db_search() * Add support for duplicate timestamps in circ_buff_search() * Fix docker events contexts * Various query fixes prior to reverse order search. - Add reverse qsort() function in circ buffers. - Fix issues to properly support of duplicate timestamps. - Separate requested from actual timestamps in query parameters. - Rename results buffer variable name to be consistent between DB and buffers. - Remove default start and end timestamp from functions. - Improve handling of invalid quotas provided by users. - Rename 'until' timestamp name to 'to'. - Increase default quota to 10MB from 1MB. - Allow start timestamp to be > than end timestamp. * Complete descending timestamp search for circular buffers * Complete descending timestamp search for DB * Remove MEASURE_QUERY_TIME code block * Complete descending timestamp search when data resides in both DB and circular buffers * Use pointer instead of copying res_hdr in query results * Refactor web log timezone parsing to use static memory allocation * Add stats for CPU user & system time per MiB of query results * Micro-optimization to slightly speed up queries * More micro-optimizations and some code cleanup * Remove LOGS_QUERY_DATA_FORMAT_NEW_LINE option * Escape iscntrl() chars at collection rather at query * Reduce number of buffer_strcat() calls * Complete descending timestamp order queries for web_api_v1 * Complete descending timestamp order queries for functions * Fix functions query timings to match web_api_v1 ones * Add MQTT message collector Squashed commit of the following: commit dbe515372ee04880b1841ef7800abe9385b12e1c Author: Dim-P <dimitris1703@gmail.com> Date: Mon Aug 21 15:18:46 2023 +0100 Update README.md with MQTT information commit c0b5dbcb7cdef8c6fbd5e72e7bdd08957a0fd3de Author: Dim-P <dimitris1703@gmail.com> Date: Mon Aug 21 14:59:36 2023 +0100 Tidy up before merge commit 9a69c4f17eac858532918a8f850a770b12710f80 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Aug 21 12:54:33 2023 +0100 Fix issue with duplicate Log_Source_Path in DB, introduced in commit e417af3 commit 48213e9713216d62fca8a5bc1bbc41a3883fdc14 Author: Dim-P <dimitris1703@gmail.com> Date: Sat Aug 19 05:05:36 2023 +0100 WIP commit e417af3b947f11bd61e3255306bc95953863998d Author: Dim-P <dimitris1703@gmail.com> Date: Thu Aug 17 18:03:39 2023 +0100 Update functions logsmanagement help output * Inhibit Fluent Bit build warnings * Fix missing allow_subpaths value in api_commands_v1[]. * Fix missing HTTP_RESP_BACKEND_FETCH_FAILED error * Fix an enum print warning * Remove systemd-devel requirement from README and fix codacy warnings * Update Alpine versions for musl-fts-dev * Update Fluent Bit to v2.1.8 Squashed commit of the following: commit faf6fc4b7919cc2611124acc67cb1973ce705530 Author: Dim-P <dimitris1703@gmail.com> Date: Fri Aug 25 17:13:30 2023 +0100 Fix wrong default CORE_STACK_SIZE on Alpine commit a810238fe7830ce626f6d57245d68035b29723f7 Author: Dim-P <dimitris1703@gmail.com> Date: Fri Aug 25 00:40:02 2023 +0100 Update Fluent Bit patches for musl commit 8bed3b611dba94a053e22c2b4aa1d46f7787d9b4 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Aug 24 21:54:38 2023 +0100 Fix an edge case crash when web log method is '-' commit b29b48ea230363142697f9749508cd926e18ee19 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Aug 24 16:26:13 2023 +0100 Disable FLB_OUT_CALYPTIA to fix Alpine dlsym() error commit eabe0d0523ffe98ff881675c21b0763a49c05f16 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Aug 22 21:25:54 2023 +0100 Add 'use inotify = no' troubleshooting Q&A in README commit 7f7ae85bdb0def63b4fc05ab88f6572db948e0e7 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Aug 22 18:06:36 2023 +0100 Update README.md links to latest version commit 610c5ac7b920d4a1dfe364ad48f1ca14a0acc346 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Aug 22 16:23:30 2023 +0100 Update flb_parser_create() definition commit f99608ff524b6f3462264e626a1073f9c2fdfdf5 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Aug 22 16:23:04 2023 +0100 Add new config.cmake options commit 446b0d564626055a0a125f525d0bd3754184b830 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Aug 22 12:21:25 2023 +0100 Update Fluent Bit submodule to v2.1.8 * Add logs_management_unittest() to CI 'unittest' * Remove obsolete query testing files * Patch Fluent Bit log format to match netdata's format * Update README with instructions on how to monitor Podman events logs * Fix core dump in case of flb_lib_path dlopen() * Fix some potential compiler warnings * Fix queries crash if logs manag engine not running * Much faster termination of LOGS MANAGEMENT * Add facets support and other minor fixes. logsmanagement_function_execute_cb() is replaced by logsmanagement_function_facets() which adds facets support to logs management queries. Internal query results header now includes additional fields (log_source, log_type, basename, filename, chartname), that are used as facets. Queries now support timeout as a query parameter. A web log timestamp bug is fixed (by using timegm() instead of mktime(). web_api_v1 logsmanagement API is only available in debugging now. Squashed commit of the following: commit 32cf0381283029d793ec3af30d96e6cd77ee9149 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Sep 19 16:21:32 2023 +0300 Tidy up commit f956b5846451c6b955a150b5d071947037e935f0 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Sep 19 13:30:54 2023 +0300 Add more accepted params. Add data_only option. Add if_modified_since option. commit 588c2425c60dcdd14349b7b346467dba32fda4e9 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Sep 18 18:39:50 2023 +0300 Add timeout to queries commit da0f055fc47a36d9af4b7cc4cefb8eb6630e36d9 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 14 19:17:16 2023 +0300 Fix histogram commit 7149890974e0d26420ec1c5cfe1023801dc973fa Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 14 17:58:52 2023 +0300 Add keyword query using simple patterns and fix descending timestamp values commit 0bd068c5a76e694b876027e9fa5af6f333ab825b Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 14 13:54:05 2023 +0300 Add basename, filename, chartname as facets commit 023c2b5f758b2479a0e48da575cd59500a1373b6 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 14 13:26:06 2023 +0300 Add info and sources functions options commit ab4d555b7d445f7291af474847bd9177d3726a76 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 14 12:54:37 2023 +0300 Fix facet id filter commit a69c9e2732f5a6da1764bb57d1c06d8d65979225 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 14 12:07:13 2023 +0300 WIP: Add facet id filters commit 3c02b5de81fa8a20c712863c347539a52936ddd8 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Sep 12 18:19:17 2023 +0300 Add log source and log type to circ buff query results header commit 8ca98672c4911c126e50f3cbdd69ac363abdb33d Author: Dim-P <dimitris1703@gmail.com> Date: Tue Sep 12 18:18:13 2023 +0300 Fix logsmanagement facet function after master rebasing commit 3f1517ad56cda2473a279a8d130bec869fc2cbb8 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Sep 12 18:14:25 2023 +0300 Restrict /logsmanagement to ACL_DEV_OPEN_ACCESS only commit 8ca98d69b08d006c682997268d5d2523ddde6be0 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Sep 12 14:40:22 2023 +0300 Fix incorrectly parsed timestamps due to DST commit f9b0848037b29c7fcc46da951ca5cd9eb129066f Author: Dim-P <dimitris1703@gmail.com> Date: Mon Sep 11 13:42:18 2023 +0300 Add logs_management_meta object to facet query results commit babc978f6c97107aaf8b337d8d31735d61761b6a Author: Dim-P <dimitris1703@gmail.com> Date: Mon Sep 11 13:03:52 2023 +0300 Query all sources if no arguments provided commit 486d56de87af56aae6c0dc5d165341418222ce8b Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 7 18:38:04 2023 +0300 Add log_source and log_type (only for DB logs) as facets. Add relative time support commit b564c12843d355c4da6436af358d5f352cb58bfe Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 7 13:47:20 2023 +0300 Working facet with descending timestamps commit 68c6a5c64e8425cf28ec16adfb0c50289caa82a9 Author: Dim-P <dimitris1703@gmail.com> Date: Wed Sep 6 01:55:51 2023 +0300 WIP * Fix linking errors * Convert logs management to external plugin. Squashed commit of the following: commit 16da6ba70ebde0859aed734087f04af497ce3a77 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 24 18:44:12 2023 +0100 Use higher value of update every from netdata.conf or logsmanagement.d.conf commit 88cc3497c403e07686e9fc0876ebb0c610a1404c Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 24 18:43:02 2023 +0100 Tidy up commit c3fca57aac169842637d210269519612b1a91e28 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 24 18:02:04 2023 +0100 Use external update_every from agent, if available commit f7470708ba82495b03297cdf8962a09b16617ddd Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 24 17:40:46 2023 +0100 Re-enable debug logs commit b34f5ac6a2228361ab41df7d7e5e713f724368c0 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 24 15:49:20 2023 +0100 Remove old API calls from web_api_v1.c/h commit 7fbc1e699a7785ec837233b9562199ee6c7684da Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 24 15:32:04 2023 +0100 Add proper termination of stats charts thread commit 4c0fc05c8b14593bd7a0aa68f75a8a1205e04db4 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 24 15:31:36 2023 +0100 Add tests for logsmanag_config functions commit 4dfdacb55707ab46ed6c2d5ce538ac012574b27e Author: Dim-P <dimitris1703@gmail.com> Date: Mon Oct 23 22:01:19 2023 +0100 Remove unused headers from logsmanagement.c commit b324ef396207c5c32e40ea9ad462bf374470b230 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Oct 23 21:56:26 2023 +0100 Remove inline from get_X_dir() functions commit e9656e8121b66cd7ef8b5daaa5d27a134427aa35 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Oct 23 21:50:32 2023 +0100 Proper termination when a signal is received commit b09eec147bdeffae7b268b6335f6ba89f084e050 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Oct 23 20:12:13 2023 +0100 Refactor logs management config code in separate source files commit 014b46a5008fd296f7d25854079c518d018abdec Author: Dim-P <dimitris1703@gmail.com> Date: Mon Oct 23 14:54:47 2023 +0100 Fix p_file_info_destroy() crash commit e0bdfd182513bb8d5d4b4b5b8a4cc248ccf2d64e Author: Dim-P <dimitris1703@gmail.com> Date: Mon Oct 23 14:18:27 2023 +0100 Code refactoring and cleanup commit 6a61cb6e2fd3a535db150b01d9450f44b3e27b30 Author: Dim-P <dimitris1703@gmail.com> Date: Fri Oct 20 14:08:43 2023 +0100 Fix 'source:all' queries commit 45b516aaf819ac142353e323209b7d01e487393f Author: Dim-P <dimitris1703@gmail.com> Date: Thu Oct 19 21:51:05 2023 +0100 Working 'source:...' queries and regular data queries (but not 'source:all') commit 8064b0ee71c63da9803f79424802f860e96326e5 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Oct 19 15:34:23 2023 +0100 Fix issue due to p_file_info_destroy() commit a0aacc9cd00cea60218c9bfd2b9f164918a1e3de Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 17 22:06:34 2023 +0100 Work on facet API changes commit 480584ff9040c07e996b14efb4d21970a347633f Author: Dim-P <dimitris1703@gmail.com> Date: Mon Oct 16 21:43:06 2023 +0100 Add stats charts, running as separate thread commit 34d582dbe4bf2d8d048afab41681e337705bc611 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Oct 16 16:24:02 2023 +0100 Add SSL cipher charts commit ced27ee4e2c981d291f498244f2eef2556a074fb Author: Dim-P <dimitris1703@gmail.com> Date: Sun Oct 15 21:33:29 2023 +0100 Add Response code family, Response code, Response code type, SSL protocol charts commit 40c4a1d91892d49b1e4e18a1c3c43258ded4014d Author: Dim-P <dimitris1703@gmail.com> Date: Sat Oct 14 00:48:48 2023 +0100 Add more web log charts commit 890ed3ff97153dd18d15df2d1b57a181bc498ca8 Author: Dim-P <dimitris1703@gmail.com> Date: Fri Oct 13 22:14:11 2023 +0100 Add web log vhosts and ports charts commit 84733b6b1d353aff70687603019443610a8500c3 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Oct 12 21:40:16 2023 +0100 Add systemd charts commit 14673501e8f48560956f53d5b670bbe801b8f2ae Author: Dim-P <dimitris1703@gmail.com> Date: Wed Oct 11 00:28:43 2023 +0100 Add MQTT charts commit 366eb63b0a27dde6f0f8ba65120f34c18c1b21fd Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 10 21:46:19 2023 +0100 Complete kmsg changes. Reduce mem usage. Fix a dictionary key size bug commit 3d0216365a526ffbc9ce13a20c45447bfccb47d9 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 10 19:18:41 2023 +0100 Add kmsg Subsystem charts commit e61af4bb130a5cf5a5a78133f1e44b2b4c457b24 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 10 16:21:29 2023 +0100 Fix bug of wrong kmsg timestamps in case of use_log_timestamp == 0 commit 03d22e0b26bddf249aab431a4f977bbd5cde98ca Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 10 16:20:47 2023 +0100 Add kmsg charts, except for Subsystem and Device commit f60b0787537a21ed3c4cea5101fcddc50f3bc55a Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 10 13:12:13 2023 +0100 Initialise all docker events chart dimensions at startup commit 5d873d3439abaf3768530cb5b72c6b4ef6565353 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 10 00:53:35 2023 +0100 WIP: Add Docker events logs commit 2cc3d6d98f58fc3ab67a8da3014210b14d0926a1 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Oct 9 18:52:27 2023 +0100 Use macros for num_of_logs_charts and custom_charts functions commit fbd48ad3c9af674601238990d74192427475f2e3 Author: Dim-P <dimitris1703@gmail.com> Date: Mon Oct 9 18:26:17 2023 +0100 Refactor custom charts code for clarity and speed commit a31d80b5dc91161c0d74b10d00bc4fd1e6da7965 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Oct 5 23:58:27 2023 +0100 Add first working iteration of custom charts commit b1e4ab8a460f4b4c3e2804e2f775787d21fbee45 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Oct 5 23:57:27 2023 +0100 Add more custom charts for Netdata error.log commit f1b7605e564da3e297942f073593cdd4c21f88e1 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Oct 5 20:39:40 2023 +0100 Convert collected_logs_* chart updates to macros commit 1459bc2b8bcd5ba21e024b10a8a5101048938f71 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Oct 5 19:11:54 2023 +0100 Use rrdset_timed_done() instead of duration_since_last_update for correct chart timings commit 876854c6ee7586a3eb9fdbf795bcc17a5fd1e6ad Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 3 21:53:14 2023 +0100 Fix some bugs in chart updates commit ae87508485499984bcb9b72bbc7d249c4168b380 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Oct 3 21:32:55 2023 +0100 Functioning generic_chart_init() and generic_chart_update() commit 982a9c4108dbea9571c785b5ff8a9d1e5472066c Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 28 23:32:52 2023 +0100 Add support for multiple .conf files. Add stock examples. commit 8e8abd0731227eb3fb3c6bcd811349575160799e Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 28 17:38:30 2023 +0100 Add support for logsmanagement.d/default.conf commit 1bf0732217b1d9e9959e1507ea96fc2c92ffb2ff Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 28 14:31:03 2023 +0100 Add capabilities. Fix paths in logsmanagement.d.conf commit a849d5b405bb4e5d770726fe99413a4efa7df274 Author: Dim-P <dimitris1703@gmail.com> Date: Tue Sep 26 23:06:31 2023 +0100 Change logs_manag_config_load() commit b0d1783b996286cd87e0832bfb74c29a845d61fc Author: Dim-P <dimitris1703@gmail.com> Date: Tue Sep 26 15:35:30 2023 +0100 Working unit tests and argument parsing commit 6da1b4267a4d58d3a7cbcca9507afe8158a2e324 Author: Dim-P <dimitris1703@gmail.com> Date: Fri Sep 22 00:32:47 2023 +0300 Build logs-management.plugin successfully commit 9e30efe0422e4941f99cc66998d9f42e00a24676 Author: Dim-P <dimitris1703@gmail.com> Date: Thu Sep 21 16:13:21 2023 +0300 Fix print format specifier in web_client_api_request_v1_logsmanagement() * Modify CODEOWNERS * Update README.md Fix indentation * Change snprintfz() to stncpyz() in circ_buff_search(). Change remaining 'chart_name' to 'chartname'. * Replace SQLite version function with macro * Fix some codacy warnings * Update README.md * Update Fluent Bit to v2.1.10 * Remove some comments * Fix Fluent Bit shared library linking for armv7l and FreeBSD * Remove compression source files * Add prefix to rrd_api.h functions * Add more unit tests * Fix kmsg capabilities * Separate kmsg and systemd default paths * Fix some memory leaks and better termination of DB * Add iterative queries if quota is exceeded * Fix centos7 builds * Fix issue where SYSTEMD timestamps are not parsed * Fix logs management packaging. * Fix typo in DEB control file. * Fix indentation and missing new line at EOF * Clean up functions and update help * Fix 400 error when no queryable sources are available * Fix if_modified_since. Add FACET_MAX_VALUE_LENGTH * Add delta parameter and use anchor points in queries * Fix CodeQL #182 warning * Fix packaging issues. * Fix postinstall script for DEB packages. * Improve plugin shutdown speed * Fix docker events chart grouping * Fix functions evloop threads not terminating upon shutdown * Fix coverity issues * Fix logging * Replace 'Netdata error.log' with 'Netdata daemon.log' in 'default.conf' * Remove 'enabled = yes/no' config in logsmanagement.d.conf * Remove 'enabled = X' unused config from logsmanagement.d.conf --------- Co-authored-by: Austin S. Hemmelgarn <austin@netdata.cloud>
1396 lines
75 KiB
C
1396 lines
75 KiB
C
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
|
|
|
|
/** @file db_api.c
|
|
* @brief This is the file implementing the API to the
|
|
* logs management database.
|
|
*/
|
|
|
|
#include "daemon/common.h"
|
|
#include "db_api.h"
|
|
#include <inttypes.h>
|
|
#include <stdio.h>
|
|
#include "circular_buffer.h"
|
|
#include "helper.h"
|
|
#include "lz4.h"
|
|
#include "parser.h"
|
|
|
|
#define MAIN_DB "main.db" /**< Primary DB with metadata for all the logs managemt collections **/
|
|
#define MAIN_COLLECTIONS_TABLE "LogCollections" /*< Table name where logs collections metadata is stored in MAIN_DB **/
|
|
#define BLOB_STORE_FILENAME "logs.bin." /*< Filename of BLOBs where logs are stored in **/
|
|
#define METADATA_DB_FILENAME "metadata.db" /**< Metadata DB for each log collection **/
|
|
#define LOGS_TABLE "Logs" /*< Table name where logs metadata is stored in METADATA_DB_FILENAME **/
|
|
#define BLOBS_TABLE "Blobs" /*< Table name where BLOBs metadata is stored in METADATA_DB_FILENAME **/
|
|
|
|
#define LOGS_MANAG_DB_VERSION 1
|
|
|
|
static sqlite3 *main_db = NULL; /**< SQLite DB handler for MAIN_DB **/
|
|
static char *main_db_dir = NULL; /**< Directory where all the log management databases and log blobs are stored in **/
|
|
static char *main_db_path = NULL; /**< Path of MAIN_DB **/
|
|
|
|
/* -------------------------------------------------------------------------- */
|
|
/* Database migrations */
|
|
/* -------------------------------------------------------------------------- */
|
|
|
|
/**
|
|
* @brief No-op database migration, just to bump up starting version.
|
|
* @param database Unused
|
|
* @param name Unused
|
|
* @return Always 0.
|
|
*/
|
|
static int do_migration_noop(sqlite3 *database, const char *name){
|
|
UNUSED(database);
|
|
UNUSED(name);
|
|
collector_info("Running database migration %s", name);
|
|
return 0;
|
|
}
|
|
|
|
typedef struct database_func_migration_list{
|
|
char *name;
|
|
int (*func)(sqlite3 *database, const char *name);
|
|
} DATABASE_FUNC_MIGRATION_LIST;
|
|
|
|
DATABASE_FUNC_MIGRATION_LIST migration_list_main_db[] = {
|
|
{.name = MAIN_DB" v0 to v1", .func = do_migration_noop},
|
|
// the terminator of this array
|
|
{.name = NULL, .func = NULL}
|
|
};
|
|
|
|
DATABASE_FUNC_MIGRATION_LIST migration_list_metadata_db[] = {
|
|
{.name = METADATA_DB_FILENAME " v0 to v1", .func = do_migration_noop},
|
|
// the terminator of this array
|
|
{.name = NULL, .func = NULL}
|
|
};
|
|
|
|
typedef enum {
|
|
ERR_TYPE_OTHER,
|
|
ERR_TYPE_SQLITE,
|
|
ERR_TYPE_LIBUV,
|
|
} logs_manag_db_error_t;
|
|
|
|
/**
|
|
* @brief Logs a database error
|
|
* @param[in] log_source Log source that caused the error
|
|
* @param[in] error_type Type of error
|
|
* @param[in] rc Error code
|
|
* @param[in] line Line number where the error occurred (__LINE__)
|
|
* @param[in] file Source file where the error occurred (__FILE__)
|
|
* @param[in] func Function where the error occurred (__FUNCTION__)
|
|
*/
|
|
static void throw_error(const char *const log_source,
|
|
const logs_manag_db_error_t error_type,
|
|
const int rc, const int line,
|
|
const char *const file, const char *const func){
|
|
collector_error("[%s]: %s database error: (%d) %s (%s:%s:%d))",
|
|
log_source ? log_source : "-",
|
|
error_type == ERR_TYPE_OTHER ? "" : ERR_TYPE_SQLITE ? "SQLite" : "libuv",
|
|
rc, error_type == ERR_TYPE_OTHER ? "" : ERR_TYPE_SQLITE ? sqlite3_errstr(rc) : uv_strerror(rc),
|
|
file, func, line);
|
|
}
|
|
|
|
/**
|
|
* @brief Get or set user_version of database.
|
|
* @param db SQLite database to act upon.
|
|
* @param set_user_version If <= 0, just get user_version. Otherwise, set
|
|
* user_version first, before returning it.
|
|
* @return Database user_version or -1 in case of error.
|
|
*/
|
|
int db_user_version(sqlite3 *const db, const int set_user_version){
|
|
if(unlikely(!db)) return -1;
|
|
int rc = 0;
|
|
if(set_user_version <= 0){
|
|
sqlite3_stmt *stmt_get_user_version;
|
|
rc = sqlite3_prepare_v2(db, "PRAGMA user_version;", -1, &stmt_get_user_version, NULL);
|
|
if (unlikely(SQLITE_OK != rc)) {
|
|
throw_error(NULL, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
return -1;
|
|
}
|
|
rc = sqlite3_step(stmt_get_user_version);
|
|
if (unlikely(SQLITE_ROW != rc)) {
|
|
throw_error(NULL, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
return -1;
|
|
}
|
|
int current_user_version = sqlite3_column_int(stmt_get_user_version, 0);
|
|
rc = sqlite3_finalize(stmt_get_user_version);
|
|
if (unlikely(SQLITE_OK != rc)) {
|
|
throw_error(NULL, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
return -1;
|
|
}
|
|
return current_user_version;
|
|
} else {
|
|
char buf[25];
|
|
snprintfz(buf, 25, "PRAGMA user_version=%d;", set_user_version);
|
|
rc = sqlite3_exec(db, buf, NULL, NULL, NULL);
|
|
if (unlikely(SQLITE_OK!= rc)) {
|
|
throw_error(NULL, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
return -1;
|
|
}
|
|
return set_user_version;
|
|
}
|
|
}
|
|
|
|
static void db_writer_db_mode_none(void *arg){
|
|
struct File_info *const p_file_info = (struct File_info *) arg;
|
|
Circ_buff_item_t *item;
|
|
|
|
while(__atomic_load_n(&p_file_info->state, __ATOMIC_RELAXED) == LOG_SRC_READY){
|
|
uv_rwlock_rdlock(&p_file_info->circ_buff->buff_realloc_rwlock);
|
|
do{ item = circ_buff_read_item(p_file_info->circ_buff);} while(item);
|
|
circ_buff_read_done(p_file_info->circ_buff);
|
|
uv_rwlock_rdunlock(&p_file_info->circ_buff->buff_realloc_rwlock);
|
|
for(int i = 0; i < p_file_info->buff_flush_to_db_interval * 4; i++){
|
|
if(__atomic_load_n(&p_file_info->state, __ATOMIC_RELAXED) != LOG_SRC_READY)
|
|
break;
|
|
sleep_usec(250 * USEC_PER_MS);
|
|
}
|
|
}
|
|
}
|
|
|
|
#define return_db_writer_db_mode_none(p_file_info, do_mut_unlock){ \
|
|
p_file_info->db_mode = LOGS_MANAG_DB_MODE_NONE; \
|
|
freez((void *) p_file_info->db_dir); \
|
|
p_file_info->db_dir = strdupz(""); \
|
|
freez((void *) p_file_info->db_metadata); \
|
|
p_file_info->db_metadata = NULL; \
|
|
sqlite3_finalize(stmt_logs_insert); \
|
|
sqlite3_finalize(stmt_blobs_get_total_filesize); \
|
|
sqlite3_finalize(stmt_blobs_update); \
|
|
sqlite3_finalize(stmt_blobs_set_zero_filesize); \
|
|
sqlite3_finalize(stmt_logs_delete); \
|
|
if(do_mut_unlock){ \
|
|
uv_mutex_unlock(p_file_info->db_mut); \
|
|
uv_rwlock_rdunlock(&p_file_info->circ_buff->buff_realloc_rwlock); \
|
|
} \
|
|
if(__atomic_load_n(&p_file_info->state, __ATOMIC_RELAXED) == LOG_SRC_READY) \
|
|
return fatal_assert(!uv_thread_create( p_file_info->db_writer_thread, \
|
|
db_writer_db_mode_none, \
|
|
p_file_info)); \
|
|
}
|
|
|
|
static void db_writer_db_mode_full(void *arg){
|
|
int rc = 0;
|
|
struct File_info *const p_file_info = (struct File_info *) arg;
|
|
|
|
sqlite3_stmt *stmt_logs_insert = NULL;
|
|
sqlite3_stmt *stmt_blobs_get_total_filesize = NULL;
|
|
sqlite3_stmt *stmt_blobs_update = NULL;
|
|
sqlite3_stmt *stmt_blobs_set_zero_filesize = NULL;
|
|
sqlite3_stmt *stmt_logs_delete = NULL;
|
|
|
|
/* Prepare LOGS_TABLE INSERT statement */
|
|
rc = sqlite3_prepare_v2(p_file_info->db,
|
|
"INSERT INTO " LOGS_TABLE "("
|
|
"FK_BLOB_Id,"
|
|
"BLOB_Offset,"
|
|
"Timestamp,"
|
|
"Msg_compr_size,"
|
|
"Msg_decompr_size,"
|
|
"Num_lines"
|
|
") VALUES (?,?,?,?,?,?) ;",
|
|
-1, &stmt_logs_insert, NULL);
|
|
if (unlikely(SQLITE_OK != rc)) {
|
|
throw_error(p_file_info->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
return_db_writer_db_mode_none(p_file_info, 0);
|
|
}
|
|
|
|
/* Prepare BLOBS_TABLE get total filesize statement */
|
|
rc = sqlite3_prepare_v2(p_file_info->db,
|
|
"SELECT SUM(Filesize) FROM " BLOBS_TABLE " ;",
|
|
-1, &stmt_blobs_get_total_filesize, NULL);
|
|
if (unlikely(SQLITE_OK != rc)) {
|
|
throw_error(p_file_info->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
return_db_writer_db_mode_none(p_file_info, 0);
|
|
}
|
|
|
|
/* Prepare BLOBS_TABLE UPDATE statement */
|
|
rc = sqlite3_prepare_v2(p_file_info->db,
|
|
"UPDATE " BLOBS_TABLE
|
|
" SET Filesize = Filesize + ?"
|
|
" WHERE Id = ? ;",
|
|
-1, &stmt_blobs_update, NULL);
|
|
if (unlikely(SQLITE_OK != rc)) {
|
|
throw_error(p_file_info->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
return_db_writer_db_mode_none(p_file_info, 0);
|
|
}
|
|
|
|
/* Prepare BLOBS_TABLE UPDATE SET zero filesize statement */
|
|
rc = sqlite3_prepare_v2(p_file_info->db,
|
|
"UPDATE " BLOBS_TABLE
|
|
" SET Filesize = 0"
|
|
" WHERE Id = ? ;",
|
|
-1, &stmt_blobs_set_zero_filesize, NULL);
|
|
if (unlikely(SQLITE_OK != rc)) {
|
|
throw_error(p_file_info->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
return_db_writer_db_mode_none(p_file_info, 0);
|
|
}
|
|
|
|
/* Prepare LOGS_TABLE DELETE statement */
|
|
rc = sqlite3_prepare_v2(p_file_info->db,
|
|
"DELETE FROM " LOGS_TABLE
|
|
" WHERE FK_BLOB_Id = ? ;",
|
|
-1, &stmt_logs_delete, NULL);
|
|
if (unlikely(SQLITE_OK != rc)) {
|
|
throw_error(p_file_info->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
return_db_writer_db_mode_none(p_file_info, 0);
|
|
}
|
|
|
|
/* Get initial filesize of logs.bin.0 BLOB */
|
|
sqlite3_stmt *stmt_retrieve_filesize_from_id = NULL;
|
|
if(unlikely(
|
|
SQLITE_OK != (rc = sqlite3_prepare_v2(p_file_info->db,
|
|
"SELECT Filesize FROM " BLOBS_TABLE
|
|
" WHERE Id = ? ;",
|
|
-1, &stmt_retrieve_filesize_from_id, NULL)) ||
|
|
SQLITE_OK != (rc = sqlite3_bind_int(stmt_retrieve_filesize_from_id, 1,
|
|
p_file_info->blob_write_handle_offset)) ||
|
|
SQLITE_ROW != (rc = sqlite3_step(stmt_retrieve_filesize_from_id))
|
|
)){
|
|
throw_error(p_file_info->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
return_db_writer_db_mode_none(p_file_info, 0);
|
|
}
|
|
int64_t blob_filesize = (int64_t) sqlite3_column_int64(stmt_retrieve_filesize_from_id, 0);
|
|
rc = sqlite3_finalize(stmt_retrieve_filesize_from_id);
|
|
if (unlikely(SQLITE_OK != rc)) {
|
|
throw_error(p_file_info->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
return_db_writer_db_mode_none(p_file_info, 0);
|
|
}
|
|
|
|
struct timespec ts_db_write_start, ts_db_write_end, ts_db_rotate_end;
|
|
while(__atomic_load_n(&p_file_info->state, __ATOMIC_RELAXED) == LOG_SRC_READY){
|
|
clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts_db_write_start);
|
|
|
|
uv_rwlock_rdlock(&p_file_info->circ_buff->buff_realloc_rwlock);
|
|
uv_mutex_lock(p_file_info->db_mut);
|
|
|
|
/* ---------------------------------------------------------------------
|
|
* Read items from circular buffer and store them in disk BLOBs.
|
|
* After that, SQLite metadata is updated.
|
|
* ------------------------------------------------------------------ */
|
|
Circ_buff_item_t *item = circ_buff_read_item(p_file_info->circ_buff);
|
|
while (item) {
|
|
m_assert(TEST_MS_TIMESTAMP_VALID(item->timestamp), "item->timestamp == 0");
|
|
m_assert(item->text_compressed_size != 0, "item->text_compressed_size == 0");
|
|
m_assert(item->text_size != 0, "item->text_size == 0");
|
|
|
|
/* Write logs in BLOB */
|
|
uv_fs_t write_req;
|
|
uv_buf_t uv_buf = uv_buf_init((char *) item->text_compressed, (unsigned int) item->text_compressed_size);
|
|
rc = uv_fs_write( NULL, &write_req,
|
|
p_file_info->blob_handles[p_file_info->blob_write_handle_offset],
|
|
&uv_buf, 1, blob_filesize, NULL); // Write synchronously at the end of the BLOB file
|
|
uv_fs_req_cleanup(&write_req);
|
|
if(unlikely(rc < 0)){
|
|
throw_error(p_file_info->chartname, ERR_TYPE_LIBUV, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
circ_buff_read_done(p_file_info->circ_buff);
|
|
return_db_writer_db_mode_none(p_file_info, 1);
|
|
}
|
|
|
|
/* Ensure data is flushed to BLOB via fdatasync() */
|
|
uv_fs_t dsync_req;
|
|
rc = uv_fs_fdatasync( NULL, &dsync_req,
|
|
p_file_info->blob_handles[p_file_info->blob_write_handle_offset], NULL);
|
|
uv_fs_req_cleanup(&dsync_req);
|
|
if (unlikely(rc)){
|
|
throw_error(p_file_info->chartname, ERR_TYPE_LIBUV, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
circ_buff_read_done(p_file_info->circ_buff);
|
|
return_db_writer_db_mode_none(p_file_info, 1);
|
|
}
|
|
|
|
if(unlikely(
|
|
/* Write metadata of logs in LOGS_TABLE */
|
|
SQLITE_OK != (rc = sqlite3_exec(p_file_info->db, "BEGIN TRANSACTION;", NULL, NULL, NULL)) ||
|
|
SQLITE_OK != (rc = sqlite3_bind_int(stmt_logs_insert, 1, p_file_info->blob_write_handle_offset)) ||
|
|
SQLITE_OK != (rc = sqlite3_bind_int64(stmt_logs_insert, 2, (sqlite3_int64) blob_filesize)) ||
|
|
SQLITE_OK != (rc = sqlite3_bind_int64(stmt_logs_insert, 3, (sqlite3_int64) item->timestamp)) ||
|
|
SQLITE_OK != (rc = sqlite3_bind_int64(stmt_logs_insert, 4, (sqlite3_int64) item->text_compressed_size)) ||
|
|
SQLITE_OK != (rc = sqlite3_bind_int64(stmt_logs_insert, 5, (sqlite3_int64)item->text_size)) ||
|
|
SQLITE_OK != (rc = sqlite3_bind_int64(stmt_logs_insert, 6, (sqlite3_int64)item->num_lines)) ||
|
|
SQLITE_DONE != (rc = sqlite3_step(stmt_logs_insert)) ||
|
|
SQLITE_OK != (rc = sqlite3_reset(stmt_logs_insert)) ||
|
|
|
|
/* Update metadata of BLOBs filesize in BLOBS_TABLE */
|
|
SQLITE_OK != (rc = sqlite3_bind_int64(stmt_blobs_update, 1, (sqlite3_int64)item->text_compressed_size)) ||
|
|
SQLITE_OK != (rc = sqlite3_bind_int(stmt_blobs_update, 2, p_file_info->blob_write_handle_offset)) ||
|
|
SQLITE_DONE != (rc = sqlite3_step(stmt_blobs_update)) ||
|
|
SQLITE_OK != (rc = sqlite3_reset(stmt_blobs_update)) ||
|
|
SQLITE_OK != (rc = sqlite3_exec(p_file_info->db, "END TRANSACTION;", NULL, NULL, NULL))
|
|
)) {
|
|
throw_error(p_file_info->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
rc = sqlite3_exec(p_file_info->db, "ROLLBACK;", NULL, NULL, NULL);
|
|
if (unlikely(SQLITE_OK != rc))
|
|
throw_error(p_file_info->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
circ_buff_read_done(p_file_info->circ_buff);
|
|
return_db_writer_db_mode_none(p_file_info, 1);
|
|
}
|
|
|
|
/* TODO: Should we log it if there is a fatal error in the transaction,
|
|
* as there will be a mismatch between BLOBs and SQLite metadata? */
|
|
|
|
/* Increase BLOB offset and read next log message until no more messages in buff */
|
|
blob_filesize += (int64_t) item->text_compressed_size;
|
|
item = circ_buff_read_item(p_file_info->circ_buff);
|
|
}
|
|
circ_buff_read_done(p_file_info->circ_buff);
|
|
|
|
clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts_db_write_end);
|
|
|
|
/* ---------------------------------------------------------------------
|
|
* If the filesize of the current write-to BLOB is >
|
|
* p_file_info->blob_max_size, then perform a BLOBs rotation.
|
|
* ------------------------------------------------------------------ */
|
|
if(blob_filesize > p_file_info->blob_max_size){
|
|
uv_fs_t rename_req;
|
|
char old_path[FILENAME_MAX + 1], new_path[FILENAME_MAX + 1];
|
|
|
|
/* Rotate path of BLOBs */
|
|
for(int i = BLOB_MAX_FILES - 1; i >= 0; i--){
|
|
snprintfz(old_path, FILENAME_MAX, "%s" BLOB_STORE_FILENAME "%d", p_file_info->db_dir, i);
|
|
snprintfz(new_path, FILENAME_MAX, "%s" BLOB_STORE_FILENAME "%d", p_file_info->db_dir, i + 1);
|
|
rc = uv_fs_rename(NULL, &rename_req, old_path, new_path, NULL);
|
|
uv_fs_req_cleanup(&rename_req);
|
|
if (unlikely(rc)){
|
|
//TODO: This error case needs better handling, as it will result in mismatch with sqlite metadata.
|
|
// We probably require a WAL or something similar.
|
|
throw_error(p_file_info->chartname, ERR_TYPE_LIBUV, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
return_db_writer_db_mode_none(p_file_info, 1);
|
|
}
|
|
}
|
|
|
|
/* Replace the maximum number with 0 in BLOB files. */
|
|
snprintfz(old_path, FILENAME_MAX, "%s" BLOB_STORE_FILENAME "%d", p_file_info->db_dir, BLOB_MAX_FILES);
|
|
snprintfz(new_path, FILENAME_MAX, "%s" BLOB_STORE_FILENAME "%d", p_file_info->db_dir, 0);
|
|
rc = uv_fs_rename(NULL, &rename_req, old_path, new_path, NULL);
|
|
uv_fs_req_cleanup(&rename_req);
|
|
if (unlikely(rc)){
|
|
//TODO: This error case needs better handling, as it will result in mismatch with sqlite metadata.
|
|
// We probably require a WAL or something similar.
|
|
throw_error(p_file_info->chartname, ERR_TYPE_LIBUV, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
return_db_writer_db_mode_none(p_file_info, 1);
|
|
}
|
|
|
|
/* Rotate BLOBS_TABLE Filenames */
|
|
rc = sqlite3_exec(p_file_info->db,
|
|
"UPDATE " BLOBS_TABLE
|
|
" SET Filename = REPLACE( "
|
|
" Filename, "
|
|
" substr(Filename, -1), "
|
|
" case when "
|
|
" (cast(substr(Filename, -1) AS INTEGER) < (" LOGS_MANAG_STR(BLOB_MAX_FILES) " - 1)) then "
|
|
" substr(Filename, -1) + 1 else 0 end);",
|
|
NULL, NULL, NULL);
|
|
if (unlikely(rc != SQLITE_OK)) {
|
|
throw_error(p_file_info->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
//TODO: Undo rotation if possible?
|
|
return_db_writer_db_mode_none(p_file_info, 1);
|
|
}
|
|
|
|
/* -----------------------------------------------------------------
|
|
* (a) Update blob_write_handle_offset,
|
|
* (b) truncate new write-to BLOB,
|
|
* (c) update filesize of truncated BLOB in SQLite DB,
|
|
* (d) delete respective logs in LOGS_TABLE for the truncated BLOB and
|
|
* (e) reset blob_filesize
|
|
* -------------------------------------------------------------- */
|
|
/* (a) */
|
|
p_file_info->blob_write_handle_offset =
|
|
p_file_info->blob_write_handle_offset == 1 ? BLOB_MAX_FILES : p_file_info->blob_write_handle_offset - 1;
|
|
|
|
/* (b) */
|
|
uv_fs_t trunc_req;
|
|
rc = uv_fs_ftruncate(NULL, &trunc_req, p_file_info->blob_handles[p_file_info->blob_write_handle_offset], 0, NULL);
|
|
uv_fs_req_cleanup(&trunc_req);
|
|
if (unlikely(rc)){
|
|
//TODO: This error case needs better handling, as it will result in mismatch with sqlite metadata.
|
|
// We probably require a WAL or something similar.
|
|
throw_error(p_file_info->chartname, ERR_TYPE_LIBUV, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
return_db_writer_db_mode_none(p_file_info, 1);
|
|
}
|
|
|
|
/* (c) */
|
|
if(unlikely(
|
|
SQLITE_OK != (rc = sqlite3_exec(p_file_info->db, "BEGIN TRANSACTION;", NULL, NULL, NULL)) ||
|
|
SQLITE_OK != (rc = sqlite3_bind_int(stmt_blobs_set_zero_filesize, 1, p_file_info->blob_write_handle_offset)) ||
|
|
SQLITE_DONE != (rc = sqlite3_step(stmt_blobs_set_zero_filesize)) ||
|
|
SQLITE_OK != (rc = sqlite3_reset(stmt_blobs_set_zero_filesize)) ||
|
|
|
|
/* (d) */
|
|
SQLITE_OK != (rc = sqlite3_bind_int(stmt_logs_delete, 1, p_file_info->blob_write_handle_offset)) ||
|
|
SQLITE_DONE != (rc = sqlite3_step(stmt_logs_delete)) ||
|
|
SQLITE_OK != (rc = sqlite3_reset(stmt_logs_delete)) ||
|
|
SQLITE_OK != (rc = sqlite3_exec(p_file_info->db, "END TRANSACTION;", NULL, NULL, NULL))
|
|
)) {
|
|
throw_error(p_file_info->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
rc = sqlite3_exec(p_file_info->db, "ROLLBACK;", NULL, NULL, NULL);
|
|
if (unlikely(SQLITE_OK != rc))
|
|
throw_error(p_file_info->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
return_db_writer_db_mode_none(p_file_info, 1);
|
|
}
|
|
|
|
/* (e) */
|
|
blob_filesize = 0;
|
|
|
|
}
|
|
|
|
clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts_db_rotate_end);
|
|
|
|
/* Update database write & rotate timings for this log source */
|
|
__atomic_store_n(&p_file_info->db_write_duration,
|
|
(ts_db_write_end.tv_sec - ts_db_write_start.tv_sec) * NSEC_PER_SEC +
|
|
(ts_db_write_end.tv_nsec - ts_db_write_start.tv_nsec), __ATOMIC_RELAXED);
|
|
__atomic_store_n(&p_file_info->db_rotate_duration,
|
|
(ts_db_rotate_end.tv_sec - ts_db_write_end.tv_sec) * NSEC_PER_SEC +
|
|
(ts_db_rotate_end.tv_nsec - ts_db_write_end.tv_nsec), __ATOMIC_RELAXED);
|
|
|
|
/* Update total disk usage of all BLOBs for this log source */
|
|
rc = sqlite3_step(stmt_blobs_get_total_filesize);
|
|
if (unlikely(SQLITE_ROW != rc)) {
|
|
throw_error(p_file_info->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
return_db_writer_db_mode_none(p_file_info, 1);
|
|
}
|
|
__atomic_store_n(&p_file_info->blob_total_size, sqlite3_column_int64(stmt_blobs_get_total_filesize, 0), __ATOMIC_RELAXED);
|
|
rc = sqlite3_reset(stmt_blobs_get_total_filesize);
|
|
if (unlikely(SQLITE_OK != rc)) {
|
|
throw_error(p_file_info->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
return_db_writer_db_mode_none(p_file_info, 1);
|
|
}
|
|
|
|
// TODO: Can uv_mutex_unlock(p_file_info->db_mut) be moved before if(blob_filesize > p_file_info-> blob_max_size) ?
|
|
uv_mutex_unlock(p_file_info->db_mut);
|
|
uv_rwlock_rdunlock(&p_file_info->circ_buff->buff_realloc_rwlock);
|
|
for(int i = 0; i < p_file_info->buff_flush_to_db_interval * 4; i++){
|
|
if(__atomic_load_n(&p_file_info->state, __ATOMIC_RELAXED) != LOG_SRC_READY)
|
|
break;
|
|
sleep_usec(250 * USEC_PER_MS);
|
|
}
|
|
}
|
|
|
|
return_db_writer_db_mode_none(p_file_info, 0);
|
|
}
|
|
|
|
inline void db_set_main_dir(char *const dir){
|
|
main_db_dir = dir;
|
|
}
|
|
|
|
int db_init() {
|
|
int rc = 0;
|
|
char *err_msg = 0;
|
|
uv_fs_t mkdir_req;
|
|
|
|
if(unlikely(!main_db_dir || !*main_db_dir)){
|
|
rc = -1;
|
|
collector_error("main_db_dir is unset");
|
|
throw_error(NULL, ERR_TYPE_OTHER, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
goto return_error;
|
|
}
|
|
size_t main_db_path_len = strlen(main_db_dir) + sizeof(MAIN_DB) + 1;
|
|
main_db_path = mallocz(main_db_path_len);
|
|
snprintfz(main_db_path, main_db_path_len, "%s/" MAIN_DB, main_db_dir);
|
|
|
|
/* Create databases directory if it doesn't exist. */
|
|
rc = uv_fs_mkdir(NULL, &mkdir_req, main_db_dir, 0775, NULL);
|
|
uv_fs_req_cleanup(&mkdir_req);
|
|
if(rc == 0) collector_info("DB directory created: %s", main_db_dir);
|
|
else if (rc == UV_EEXIST) collector_info("DB directory %s found", main_db_dir);
|
|
else {
|
|
throw_error(NULL, ERR_TYPE_LIBUV, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
goto return_error;
|
|
}
|
|
|
|
/* Create or open main db */
|
|
rc = sqlite3_open(main_db_path, &main_db);
|
|
if (unlikely(rc != SQLITE_OK)){
|
|
throw_error(MAIN_DB, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
goto return_error;
|
|
}
|
|
|
|
/* Configure main database */
|
|
rc = sqlite3_exec(main_db,
|
|
"PRAGMA auto_vacuum = INCREMENTAL;"
|
|
"PRAGMA synchronous = 1;"
|
|
"PRAGMA journal_mode = WAL;"
|
|
"PRAGMA temp_store = MEMORY;"
|
|
"PRAGMA foreign_keys = ON;",
|
|
0, 0, &err_msg);
|
|
if (unlikely(rc != SQLITE_OK)) {
|
|
collector_error("Failed to configure database, SQL error: %s\n", err_msg);
|
|
throw_error(MAIN_DB, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
goto return_error;
|
|
} else collector_info("%s configured successfully", MAIN_DB);
|
|
|
|
/* Execute pending main database migrations */
|
|
int main_db_ver = db_user_version(main_db, -1);
|
|
if (likely(LOGS_MANAG_DB_VERSION == main_db_ver))
|
|
collector_info("Logs management %s database version is %d (no migration needed)", MAIN_DB, main_db_ver);
|
|
else {
|
|
for(int ver = main_db_ver; ver < LOGS_MANAG_DB_VERSION && migration_list_main_db[ver].func; ver++){
|
|
rc = (migration_list_main_db[ver].func)(main_db, migration_list_main_db[ver].name);
|
|
if (unlikely(rc)){
|
|
collector_error("Logs management %s database migration from version %d to version %d failed", MAIN_DB, ver, ver + 1);
|
|
throw_error(MAIN_DB, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
goto return_error;
|
|
}
|
|
db_user_version(main_db, ver + 1);
|
|
}
|
|
}
|
|
|
|
/* Create new main DB LogCollections table if it doesn't exist */
|
|
rc = sqlite3_exec(main_db,
|
|
"CREATE TABLE IF NOT EXISTS " MAIN_COLLECTIONS_TABLE "("
|
|
"Id INTEGER PRIMARY KEY,"
|
|
"Stream_Tag TEXT NOT NULL,"
|
|
"Log_Source_Path TEXT NOT NULL,"
|
|
"Type INTEGER NOT NULL,"
|
|
"DB_Dir TEXT NOT NULL,"
|
|
"UNIQUE(Stream_Tag, DB_Dir) "
|
|
");",
|
|
0, 0, &err_msg);
|
|
if (unlikely(SQLITE_OK != rc)) {
|
|
collector_error("Failed to create table" MAIN_COLLECTIONS_TABLE "SQL error: %s", err_msg);
|
|
throw_error(MAIN_DB, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
goto return_error;
|
|
}
|
|
|
|
sqlite3_stmt *stmt_search_if_log_source_exists = NULL;
|
|
rc = sqlite3_prepare_v2(main_db,
|
|
"SELECT COUNT(*), Id, DB_Dir FROM " MAIN_COLLECTIONS_TABLE
|
|
" WHERE Stream_Tag = ? AND Log_Source_Path = ? AND Type = ? ;",
|
|
-1, &stmt_search_if_log_source_exists, NULL);
|
|
if (unlikely(SQLITE_OK != rc)){
|
|
throw_error(MAIN_DB, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
goto return_error;
|
|
}
|
|
|
|
|
|
sqlite3_stmt *stmt_insert_log_collection_metadata = NULL;
|
|
rc = sqlite3_prepare_v2(main_db,
|
|
"INSERT INTO " MAIN_COLLECTIONS_TABLE
|
|
" (Stream_Tag, Log_Source_Path, Type, DB_Dir) VALUES (?,?,?,?) ;",
|
|
-1, &stmt_insert_log_collection_metadata, NULL);
|
|
if (unlikely(SQLITE_OK != rc)){
|
|
throw_error(MAIN_DB, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
goto return_error;
|
|
}
|
|
|
|
for (int i = 0; i < p_file_infos_arr->count; i++) {
|
|
|
|
struct File_info *const p_file_info = p_file_infos_arr->data[i];
|
|
|
|
if(p_file_info->db_mode == LOGS_MANAG_DB_MODE_NONE){
|
|
p_file_info->db_dir = strdupz("");
|
|
p_file_info->db_writer_thread = mallocz(sizeof(uv_thread_t));
|
|
rc = uv_thread_create(p_file_info->db_writer_thread, db_writer_db_mode_none, p_file_info);
|
|
if (unlikely(rc)){
|
|
throw_error(p_file_info->chartname, ERR_TYPE_LIBUV, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
goto return_error;
|
|
}
|
|
}
|
|
else if(p_file_info->db_mode == LOGS_MANAG_DB_MODE_FULL){
|
|
|
|
p_file_info->db_mut = mallocz(sizeof(uv_mutex_t));
|
|
rc = uv_mutex_init(p_file_info->db_mut);
|
|
if (unlikely(rc)) fatal("Failed to initialize uv_mutex_t");
|
|
uv_mutex_lock(p_file_info->db_mut);
|
|
|
|
// This error check will be used a lot, so define it here.
|
|
#define do_sqlite_error_check(p_file_info, rc, rc_expctd) do { \
|
|
if(unlikely(rc_expctd != rc)) { \
|
|
throw_error(p_file_info->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);\
|
|
uv_mutex_unlock(p_file_info->db_mut); \
|
|
goto return_error; \
|
|
} \
|
|
} while(0)
|
|
|
|
if(unlikely(
|
|
SQLITE_OK != (rc = sqlite3_bind_text(stmt_search_if_log_source_exists, 1, p_file_info->stream_guid, -1, NULL)) ||
|
|
SQLITE_OK != (rc = sqlite3_bind_text(stmt_search_if_log_source_exists, 2, p_file_info->filename, -1, NULL)) ||
|
|
SQLITE_OK != (rc = sqlite3_bind_int(stmt_search_if_log_source_exists, 3, p_file_info->log_type)) ||
|
|
/* COUNT(*) query should always return SQLITE_ROW */
|
|
SQLITE_ROW != (rc = sqlite3_step(stmt_search_if_log_source_exists)))){
|
|
throw_error(p_file_info->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
uv_mutex_unlock(p_file_info->db_mut);
|
|
goto return_error;
|
|
}
|
|
|
|
const int log_source_occurences = sqlite3_column_int(stmt_search_if_log_source_exists, 0);
|
|
switch (log_source_occurences) {
|
|
case 0: { /* Log collection metadata not found in main DB - create a new record */
|
|
|
|
/* Create directory of collection of logs for the particular
|
|
* log source (in the form of a UUID) and bind it. */
|
|
uuid_t uuid;
|
|
uuid_generate(uuid);
|
|
char uuid_str[UUID_STR_LEN]; // ex. "1b4e28ba-2fa1-11d2-883f-0016d3cca427" + "\0"
|
|
uuid_unparse_lower(uuid, uuid_str);
|
|
|
|
p_file_info->db_dir = mallocz(snprintf(NULL, 0, "%s/%s/", main_db_dir, uuid_str) + 1);
|
|
sprintf((char *) p_file_info->db_dir, "%s/%s/", main_db_dir, uuid_str);
|
|
|
|
rc = uv_fs_mkdir(NULL, &mkdir_req, p_file_info->db_dir, 0775, NULL);
|
|
uv_fs_req_cleanup(&mkdir_req);
|
|
if (unlikely(rc)) {
|
|
if(errno == EEXIST)
|
|
collector_error("DB directory %s exists but not found in %s.\n", p_file_info->db_dir, MAIN_DB);
|
|
throw_error(p_file_info->chartname, ERR_TYPE_LIBUV, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
uv_mutex_unlock(p_file_info->db_mut);
|
|
goto return_error;
|
|
}
|
|
|
|
if(unlikely(
|
|
SQLITE_OK != (rc = sqlite3_bind_text(stmt_insert_log_collection_metadata, 1, p_file_info->stream_guid, -1, NULL)) ||
|
|
SQLITE_OK != (rc = sqlite3_bind_text(stmt_insert_log_collection_metadata, 2, p_file_info->filename, -1, NULL)) ||
|
|
SQLITE_OK != (rc = sqlite3_bind_int(stmt_insert_log_collection_metadata, 3, p_file_info->log_type)) ||
|
|
SQLITE_OK != (rc = sqlite3_bind_text(stmt_insert_log_collection_metadata, 4, p_file_info->db_dir, -1, NULL)) ||
|
|
SQLITE_DONE != (rc = sqlite3_step(stmt_insert_log_collection_metadata)) ||
|
|
SQLITE_OK != (rc = sqlite3_reset(stmt_insert_log_collection_metadata)))) {
|
|
throw_error(p_file_info->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
uv_mutex_unlock(p_file_info->db_mut);
|
|
goto return_error;
|
|
}
|
|
|
|
break;
|
|
}
|
|
|
|
case 1: { /* File metadata found in DB */
|
|
p_file_info->db_dir = mallocz((size_t)sqlite3_column_bytes(stmt_search_if_log_source_exists, 2) + 1);
|
|
sprintf((char*) p_file_info->db_dir, "%s", sqlite3_column_text(stmt_search_if_log_source_exists, 2));
|
|
break;
|
|
}
|
|
|
|
default: { /* Error, file metadata can exist either 0 or 1 times in DB */
|
|
m_assert(0, "Same file stored in DB more than once!");
|
|
collector_error("[%s]: Record encountered multiple times in DB " MAIN_COLLECTIONS_TABLE " table \n",
|
|
p_file_info->filename);
|
|
throw_error(p_file_info->chartname, ERR_TYPE_OTHER, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
uv_mutex_unlock(p_file_info->db_mut);
|
|
goto return_error;
|
|
}
|
|
}
|
|
rc = sqlite3_reset(stmt_search_if_log_source_exists);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
|
|
/* Create or open metadata DBs for each log collection */
|
|
p_file_info->db_metadata = mallocz(snprintf(NULL, 0, "%s" METADATA_DB_FILENAME, p_file_info->db_dir) + 1);
|
|
sprintf((char *) p_file_info->db_metadata, "%s" METADATA_DB_FILENAME, p_file_info->db_dir);
|
|
rc = sqlite3_open(p_file_info->db_metadata, &p_file_info->db);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
|
|
/* Configure metadata DB */
|
|
rc = sqlite3_exec(p_file_info->db,
|
|
"PRAGMA auto_vacuum = INCREMENTAL;"
|
|
"PRAGMA synchronous = 1;"
|
|
"PRAGMA journal_mode = WAL;"
|
|
"PRAGMA temp_store = MEMORY;"
|
|
"PRAGMA foreign_keys = ON;",
|
|
0, 0, &err_msg);
|
|
if (unlikely(rc != SQLITE_OK)) {
|
|
collector_error("[%s]: Failed to configure database, SQL error: %s", p_file_info->filename, err_msg);
|
|
throw_error(p_file_info->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
uv_mutex_unlock(p_file_info->db_mut);
|
|
goto return_error;
|
|
}
|
|
|
|
/* Execute pending metadata database migrations */
|
|
collector_info("[%s]: About to execute " METADATA_DB_FILENAME " migrations", p_file_info->chartname);
|
|
int metadata_db_ver = db_user_version(p_file_info->db, -1);
|
|
if (likely(LOGS_MANAG_DB_VERSION == metadata_db_ver)) {
|
|
collector_info( "[%s]: Logs management " METADATA_DB_FILENAME " database version is %d (no migration needed)",
|
|
p_file_info->chartname, metadata_db_ver);
|
|
} else {
|
|
for(int ver = metadata_db_ver; ver < LOGS_MANAG_DB_VERSION && migration_list_metadata_db[ver].func; ver++){
|
|
rc = (migration_list_metadata_db[ver].func)(p_file_info->db, migration_list_metadata_db[ver].name);
|
|
if (unlikely(rc)){
|
|
collector_error("[%s]: Logs management " METADATA_DB_FILENAME " database migration from version %d to version %d failed",
|
|
p_file_info->chartname, ver, ver + 1);
|
|
throw_error(MAIN_DB, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
uv_mutex_unlock(p_file_info->db_mut);
|
|
goto return_error;
|
|
}
|
|
db_user_version(p_file_info->db, ver + 1);
|
|
}
|
|
}
|
|
|
|
/* -----------------------------------------------------------------
|
|
* Create BLOBS_TABLE and LOGS_TABLE if they don't exist. Do it
|
|
* as a transaction, so that it can all be rolled back if something
|
|
* goes wrong.
|
|
* -------------------------------------------------------------- */
|
|
{
|
|
rc = sqlite3_exec(p_file_info->db, "BEGIN TRANSACTION;", NULL, NULL, NULL);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
|
|
/* Check if BLOBS_TABLE exists or not */
|
|
sqlite3_stmt *stmt_check_if_BLOBS_TABLE_exists = NULL;
|
|
rc = sqlite3_prepare_v2(p_file_info->db,
|
|
"SELECT COUNT(*) FROM sqlite_master"
|
|
" WHERE type='table' AND name='"BLOBS_TABLE"';",
|
|
-1, &stmt_check_if_BLOBS_TABLE_exists, NULL);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
rc = sqlite3_step(stmt_check_if_BLOBS_TABLE_exists);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_ROW);
|
|
|
|
/* If BLOBS_TABLE doesn't exist, create and populate it */
|
|
if(sqlite3_column_int(stmt_check_if_BLOBS_TABLE_exists, 0) == 0){
|
|
|
|
/* 1. Create it */
|
|
rc = sqlite3_exec(p_file_info->db,
|
|
"CREATE TABLE IF NOT EXISTS " BLOBS_TABLE "("
|
|
"Id INTEGER PRIMARY KEY,"
|
|
"Filename TEXT NOT NULL,"
|
|
"Filesize INTEGER NOT NULL"
|
|
");",
|
|
0, 0, &err_msg);
|
|
if (unlikely(SQLITE_OK != rc)) {
|
|
collector_error("[%s]: Failed to create " BLOBS_TABLE ", SQL error: %s", p_file_info->chartname, err_msg);
|
|
throw_error(p_file_info->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
uv_mutex_unlock(p_file_info->db_mut);
|
|
goto return_error;
|
|
} else collector_info("[%s]: Table " BLOBS_TABLE " created successfully", p_file_info->chartname);
|
|
|
|
/* 2. Populate it */
|
|
sqlite3_stmt *stmt_init_BLOBS_table = NULL;
|
|
rc = sqlite3_prepare_v2(p_file_info->db,
|
|
"INSERT INTO " BLOBS_TABLE
|
|
" (Filename, Filesize) VALUES (?,?) ;",
|
|
-1, &stmt_init_BLOBS_table, NULL);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
|
|
for(int i = 0; i < BLOB_MAX_FILES; i++){
|
|
char filename[FILENAME_MAX + 1];
|
|
snprintfz(filename, FILENAME_MAX, BLOB_STORE_FILENAME "%d", i);
|
|
if(unlikely(
|
|
SQLITE_OK != (rc = sqlite3_bind_text(stmt_init_BLOBS_table, 1, filename, -1, NULL)) ||
|
|
SQLITE_OK != (rc = sqlite3_bind_int64(stmt_init_BLOBS_table, 2, (sqlite3_int64) 0)) ||
|
|
SQLITE_DONE != (rc = sqlite3_step(stmt_init_BLOBS_table)) ||
|
|
SQLITE_OK != (rc = sqlite3_reset(stmt_init_BLOBS_table)))){
|
|
throw_error(p_file_info->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
uv_mutex_unlock(p_file_info->db_mut);
|
|
goto return_error;
|
|
}
|
|
}
|
|
rc = sqlite3_finalize(stmt_init_BLOBS_table);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
}
|
|
rc = sqlite3_finalize(stmt_check_if_BLOBS_TABLE_exists);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
|
|
/* If LOGS_TABLE doesn't exist, create it */
|
|
rc = sqlite3_exec(p_file_info->db,
|
|
"CREATE TABLE IF NOT EXISTS " LOGS_TABLE "("
|
|
"Id INTEGER PRIMARY KEY,"
|
|
"FK_BLOB_Id INTEGER NOT NULL,"
|
|
"BLOB_Offset INTEGER NOT NULL,"
|
|
"Timestamp INTEGER NOT NULL,"
|
|
"Msg_compr_size INTEGER NOT NULL,"
|
|
"Msg_decompr_size INTEGER NOT NULL,"
|
|
"Num_lines INTEGER NOT NULL,"
|
|
"FOREIGN KEY (FK_BLOB_Id) REFERENCES " BLOBS_TABLE " (Id) ON DELETE CASCADE ON UPDATE CASCADE"
|
|
");",
|
|
0, 0, &err_msg);
|
|
if (unlikely(SQLITE_OK != rc)) {
|
|
collector_error("[%s]: Failed to create " LOGS_TABLE ", SQL error: %s", p_file_info->chartname, err_msg);
|
|
throw_error(p_file_info->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
uv_mutex_unlock(p_file_info->db_mut);
|
|
goto return_error;
|
|
} else collector_info("[%s]: Table " LOGS_TABLE " created successfully", p_file_info->chartname);
|
|
|
|
/* Create index on LOGS_TABLE Timestamp
|
|
* TODO: If this doesn't speed up queries, check SQLITE R*tree
|
|
* module. Requires benchmarking with/without index. */
|
|
rc = sqlite3_exec(p_file_info->db,
|
|
"CREATE INDEX IF NOT EXISTS logs_timestamps_idx "
|
|
"ON " LOGS_TABLE "(Timestamp);",
|
|
0, 0, &err_msg);
|
|
if (unlikely(SQLITE_OK != rc)) {
|
|
collector_error("[%s]: Failed to create logs_timestamps_idx, SQL error: %s", p_file_info->chartname, err_msg);
|
|
throw_error(p_file_info->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
uv_mutex_unlock(p_file_info->db_mut);
|
|
goto return_error;
|
|
} else collector_info("[%s]: logs_timestamps_idx created successfully", p_file_info->chartname);
|
|
|
|
rc = sqlite3_exec(p_file_info->db, "END TRANSACTION;", NULL, NULL, NULL);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
}
|
|
|
|
|
|
/* -----------------------------------------------------------------
|
|
* Remove excess BLOBs beyond BLOB_MAX_FILES (from both DB and disk
|
|
* storage).
|
|
*
|
|
* This is useful if BLOB_MAX_FILES is reduced after an agent
|
|
* restart (for example, if in the future it is not hardcoded,
|
|
* but instead it is read from the configuration file). LOGS_TABLE
|
|
* entries should be deleted automatically (due to ON DELETE CASCADE).
|
|
* -------------------------------------------------------------- */
|
|
{
|
|
sqlite3_stmt *stmt_get_BLOBS_TABLE_size = NULL;
|
|
rc = sqlite3_prepare_v2(p_file_info->db,
|
|
"SELECT MAX(Id) FROM " BLOBS_TABLE ";",
|
|
-1, &stmt_get_BLOBS_TABLE_size, NULL);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
rc = sqlite3_step(stmt_get_BLOBS_TABLE_size);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_ROW);
|
|
|
|
const int blobs_table_max_id = sqlite3_column_int(stmt_get_BLOBS_TABLE_size, 0);
|
|
|
|
sqlite3_stmt *stmt_retrieve_filename_last_digits = NULL; // This statement retrieves the last digit(s) from the Filename column of BLOBS_TABLE
|
|
rc = sqlite3_prepare_v2(p_file_info->db,
|
|
"WITH split(word, str) AS ( SELECT '', (SELECT Filename FROM " BLOBS_TABLE " WHERE Id = ? ) || '.' "
|
|
"UNION ALL SELECT substr(str, 0, instr(str, '.')), substr(str, instr(str, '.')+1) FROM split WHERE str!='' ) "
|
|
"SELECT word FROM split WHERE word!='' ORDER BY LENGTH(str) LIMIT 1;",
|
|
-1, &stmt_retrieve_filename_last_digits, NULL);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
|
|
sqlite3_stmt *stmt_delete_row_by_id = NULL;
|
|
rc = sqlite3_prepare_v2(p_file_info->db,
|
|
"DELETE FROM " BLOBS_TABLE " WHERE Id = ?;",
|
|
-1, &stmt_delete_row_by_id, NULL);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
|
|
for (int id = 1; id <= blobs_table_max_id; id++){
|
|
|
|
rc = sqlite3_bind_int(stmt_retrieve_filename_last_digits, 1, id);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
rc = sqlite3_step(stmt_retrieve_filename_last_digits);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_ROW);
|
|
int last_digits = sqlite3_column_int(stmt_retrieve_filename_last_digits, 0);
|
|
rc = sqlite3_reset(stmt_retrieve_filename_last_digits);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
|
|
/* If last_digits > BLOB_MAX_FILES - 1, then some BLOB files
|
|
* will need to be removed (both from DB BLOBS_TABLE and
|
|
* also from the disk). */
|
|
if(last_digits > BLOB_MAX_FILES - 1){
|
|
|
|
/* Delete BLOB file from filesystem */
|
|
char blob_delete_path[FILENAME_MAX + 1];
|
|
snprintfz(blob_delete_path, FILENAME_MAX, "%s" BLOB_STORE_FILENAME "%d", p_file_info->db_dir, last_digits);
|
|
uv_fs_t unlink_req;
|
|
rc = uv_fs_unlink(NULL, &unlink_req, blob_delete_path, NULL);
|
|
uv_fs_req_cleanup(&unlink_req);
|
|
if (unlikely(rc)) {
|
|
// TODO: If there is an erro here, the entry won't be deleted from BLOBS_TABLE. What to do?
|
|
throw_error(p_file_info->chartname, ERR_TYPE_LIBUV, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
uv_mutex_unlock(p_file_info->db_mut);
|
|
goto return_error;
|
|
}
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
|
|
/* Delete entry from DB BLOBS_TABLE */
|
|
rc = sqlite3_bind_int(stmt_delete_row_by_id, 1, id);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
rc = sqlite3_step(stmt_delete_row_by_id);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_DONE);
|
|
rc = sqlite3_reset(stmt_delete_row_by_id);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
}
|
|
}
|
|
rc = sqlite3_finalize(stmt_retrieve_filename_last_digits);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
rc = sqlite3_finalize(stmt_delete_row_by_id);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
|
|
/* -------------------------------------------------------------
|
|
* BLOBS_TABLE ids after the deletion might not be contiguous.
|
|
* This needs to be fixed, by having the ids updated.
|
|
* LOGS_TABLE FKs will be updated automatically
|
|
* (due to ON UPDATE CASCADE).
|
|
* ---------------------------------------------------------- */
|
|
|
|
int old_blobs_table_ids[BLOB_MAX_FILES];
|
|
int off = 0;
|
|
sqlite3_stmt *stmt_retrieve_all_ids = NULL;
|
|
rc = sqlite3_prepare_v2(p_file_info->db,
|
|
"SELECT Id FROM " BLOBS_TABLE " ORDER BY Id ASC;",
|
|
-1, &stmt_retrieve_all_ids, NULL);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
|
|
rc = sqlite3_step(stmt_retrieve_all_ids);
|
|
while(rc == SQLITE_ROW){
|
|
old_blobs_table_ids[off++] = sqlite3_column_int(stmt_retrieve_all_ids, 0);
|
|
rc = sqlite3_step(stmt_retrieve_all_ids);
|
|
}
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_DONE);
|
|
rc = sqlite3_finalize(stmt_retrieve_all_ids);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
|
|
sqlite3_stmt *stmt_update_id = NULL;
|
|
rc = sqlite3_prepare_v2(p_file_info->db,
|
|
"UPDATE " BLOBS_TABLE " SET Id = ? WHERE Id = ?;",
|
|
-1, &stmt_update_id, NULL);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
|
|
for (int i = 0; i < BLOB_MAX_FILES; i++){
|
|
if(unlikely(
|
|
SQLITE_OK != (rc = sqlite3_bind_int(stmt_update_id, 1, i + 1)) ||
|
|
SQLITE_OK != (rc = sqlite3_bind_int(stmt_update_id, 2, old_blobs_table_ids[i])) ||
|
|
SQLITE_DONE != (rc = sqlite3_step(stmt_update_id)) ||
|
|
SQLITE_OK != (rc = sqlite3_reset(stmt_update_id)))) {
|
|
throw_error(p_file_info->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
uv_mutex_unlock(p_file_info->db_mut);
|
|
goto return_error;
|
|
}
|
|
}
|
|
rc = sqlite3_finalize(stmt_update_id);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
}
|
|
|
|
/* -----------------------------------------------------------------
|
|
* Traverse BLOBS_TABLE, open logs.bin.X files and store their
|
|
* file handles in p_file_info array.
|
|
* -------------------------------------------------------------- */
|
|
sqlite3_stmt *stmt_retrieve_metadata_from_id = NULL;
|
|
rc = sqlite3_prepare_v2(p_file_info->db,
|
|
"SELECT Filename, Filesize FROM " BLOBS_TABLE
|
|
" WHERE Id = ? ;",
|
|
-1, &stmt_retrieve_metadata_from_id, NULL);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
|
|
sqlite3_stmt *stmt_retrieve_total_logs_size = NULL;
|
|
rc = sqlite3_prepare_v2(p_file_info->db,
|
|
"SELECT SUM(Msg_compr_size) FROM " LOGS_TABLE
|
|
" WHERE FK_BLOB_Id = ? GROUP BY FK_BLOB_Id ;",
|
|
-1, &stmt_retrieve_total_logs_size, NULL);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
|
|
uv_fs_t open_req;
|
|
for(int id = 1; id <= BLOB_MAX_FILES; id++){
|
|
|
|
/* Open BLOB file based on filename stored in BLOBS_TABLE. */
|
|
rc = sqlite3_bind_int(stmt_retrieve_metadata_from_id, 1, id);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
rc = sqlite3_step(stmt_retrieve_metadata_from_id);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_ROW);
|
|
|
|
char filename[FILENAME_MAX + 1] = {0};
|
|
snprintfz(filename, FILENAME_MAX, "%s%s", p_file_info->db_dir,
|
|
sqlite3_column_text(stmt_retrieve_metadata_from_id, 0));
|
|
rc = uv_fs_open(NULL, &open_req, filename,
|
|
UV_FS_O_RDWR | UV_FS_O_CREAT | UV_FS_O_APPEND | UV_FS_O_RANDOM,
|
|
0644, NULL);
|
|
if (unlikely(rc < 0)){
|
|
uv_fs_req_cleanup(&open_req);
|
|
throw_error(p_file_info->chartname, ERR_TYPE_LIBUV, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
uv_mutex_unlock(p_file_info->db_mut);
|
|
goto return_error;
|
|
}
|
|
|
|
// open_req.result of a uv_fs_t is the file descriptor in case of the uv_fs_open
|
|
p_file_info->blob_handles[id] = open_req.result;
|
|
uv_fs_req_cleanup(&open_req);
|
|
|
|
const int64_t metadata_filesize = (int64_t) sqlite3_column_int64(stmt_retrieve_metadata_from_id, 1);
|
|
|
|
/* -------------------------------------------------------------
|
|
* Retrieve total log messages compressed size from LOGS_TABLE
|
|
* for current FK_BLOB_Id.
|
|
* Only to assert whether correct - not used elsewhere.
|
|
*
|
|
* If no rows are returned, it means it is probably the initial
|
|
* execution of the program so still valid (except if rc is other
|
|
* than SQLITE_DONE, which is an error then).
|
|
* ---------------------------------------------------------- */
|
|
rc = sqlite3_bind_int(stmt_retrieve_total_logs_size, 1, id);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
rc = sqlite3_step(stmt_retrieve_total_logs_size);
|
|
if (SQLITE_ROW == rc){
|
|
const int64_t total_logs_filesize = (int64_t) sqlite3_column_int64(stmt_retrieve_total_logs_size, 0);
|
|
if(unlikely(total_logs_filesize != metadata_filesize)){
|
|
throw_error(p_file_info->chartname, ERR_TYPE_OTHER, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
uv_mutex_unlock(p_file_info->db_mut);
|
|
goto return_error;
|
|
}
|
|
} else do_sqlite_error_check(p_file_info, rc, SQLITE_DONE);
|
|
|
|
|
|
/* Get filesize of BLOB file. */
|
|
uv_fs_t stat_req;
|
|
rc = uv_fs_stat(NULL, &stat_req, filename, NULL);
|
|
if (unlikely(rc)){
|
|
uv_fs_req_cleanup(&stat_req);
|
|
throw_error(p_file_info->chartname, ERR_TYPE_LIBUV, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
uv_mutex_unlock(p_file_info->db_mut);
|
|
goto return_error;
|
|
}
|
|
const int64_t blob_filesize = (int64_t) stat_req.statbuf.st_size;
|
|
uv_fs_req_cleanup(&stat_req);
|
|
|
|
do{
|
|
/* Case 1: blob_filesize == metadata_filesize (equal, either both zero or not): All good */
|
|
if(likely(blob_filesize == metadata_filesize))
|
|
break;
|
|
|
|
/* Case 2: blob_filesize == 0 && metadata_filesize > 0: fatal(), however could it mean that
|
|
* EXT_BLOB_STORE_FILENAME was rotated but the SQLite metadata wasn't updated? So can it
|
|
* maybe be recovered by un-rotating? Either way, treat as fatal error for now. */
|
|
// TODO: Can we avoid fatal()?
|
|
if(unlikely(blob_filesize == 0 && metadata_filesize > 0)){
|
|
collector_error("[%s]: blob_filesize == 0 but metadata_filesize > 0 for '%s'\n",
|
|
p_file_info->chartname, filename);
|
|
throw_error(p_file_info->chartname, ERR_TYPE_OTHER, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
uv_mutex_unlock(p_file_info->db_mut);
|
|
goto return_error;
|
|
}
|
|
|
|
/* Case 3: blob_filesize > metadata_filesize: Truncate binary to sqlite filesize, program
|
|
* crashed or terminated after writing BLOBs to external file but before metadata was updated */
|
|
if(unlikely(blob_filesize > metadata_filesize)){
|
|
collector_info("[%s]: blob_filesize > metadata_filesize for '%s'. Will attempt to fix it.",
|
|
p_file_info->chartname, filename);
|
|
uv_fs_t trunc_req;
|
|
rc = uv_fs_ftruncate(NULL, &trunc_req, p_file_info->blob_handles[id], metadata_filesize, NULL);
|
|
uv_fs_req_cleanup(&trunc_req);
|
|
if(unlikely(rc)) {
|
|
throw_error(p_file_info->chartname, ERR_TYPE_LIBUV, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
uv_mutex_unlock(p_file_info->db_mut);
|
|
goto return_error;
|
|
}
|
|
break;
|
|
}
|
|
|
|
/* Case 4: blob_filesize < metadata_filesize: unrecoverable,
|
|
* maybe rotation went horrible wrong?
|
|
* TODO: Delete external BLOB and clear metadata from DB,
|
|
* start from clean state but the most recent logs. */
|
|
if(unlikely(blob_filesize < metadata_filesize)){
|
|
collector_info("[%s]: blob_filesize < metadata_filesize for '%s'.",
|
|
p_file_info->chartname, filename);
|
|
throw_error(p_file_info->chartname, ERR_TYPE_OTHER, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
uv_mutex_unlock(p_file_info->db_mut);
|
|
goto return_error;
|
|
}
|
|
|
|
/* Case 5: default if none of the above, should never reach here, fatal() */
|
|
m_assert(0, "Code should not reach here");
|
|
throw_error(p_file_info->chartname, ERR_TYPE_OTHER, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
uv_mutex_unlock(p_file_info->db_mut);
|
|
goto return_error;
|
|
} while(0);
|
|
|
|
|
|
/* Initialise blob_write_handle with logs.bin.0 */
|
|
if(filename[strlen(filename) - 1] == '0')
|
|
p_file_info->blob_write_handle_offset = id;
|
|
|
|
rc = sqlite3_reset(stmt_retrieve_total_logs_size);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
rc = sqlite3_reset(stmt_retrieve_metadata_from_id);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
}
|
|
|
|
rc = sqlite3_finalize(stmt_retrieve_metadata_from_id);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
|
|
/* Prepare statements to be used in single database queries */
|
|
rc = sqlite3_prepare_v2(p_file_info->db,
|
|
"SELECT Timestamp, Msg_compr_size , Msg_decompr_size, "
|
|
"BLOB_Offset, " BLOBS_TABLE".Id, Num_lines "
|
|
"FROM " LOGS_TABLE " INNER JOIN " BLOBS_TABLE " "
|
|
"ON " LOGS_TABLE ".FK_BLOB_Id = " BLOBS_TABLE ".Id "
|
|
"WHERE Timestamp >= ? AND Timestamp <= ? "
|
|
"ORDER BY Timestamp;",
|
|
-1, &p_file_info->stmt_get_log_msg_metadata_asc, NULL);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
|
|
rc = sqlite3_prepare_v2(p_file_info->db,
|
|
"SELECT Timestamp, Msg_compr_size , Msg_decompr_size, "
|
|
"BLOB_Offset, " BLOBS_TABLE".Id, Num_lines "
|
|
"FROM " LOGS_TABLE " INNER JOIN " BLOBS_TABLE " "
|
|
"ON " LOGS_TABLE ".FK_BLOB_Id = " BLOBS_TABLE ".Id "
|
|
"WHERE Timestamp <= ? AND Timestamp >= ? "
|
|
"ORDER BY Timestamp DESC;",
|
|
-1, &p_file_info->stmt_get_log_msg_metadata_desc, NULL);
|
|
do_sqlite_error_check(p_file_info, rc, SQLITE_OK);
|
|
|
|
/* DB initialisation finished; release lock */
|
|
uv_mutex_unlock(p_file_info->db_mut);
|
|
|
|
/* Create synchronous writer thread, one for each log source */
|
|
p_file_info->db_writer_thread = mallocz(sizeof(uv_thread_t));
|
|
rc = uv_thread_create(p_file_info->db_writer_thread, db_writer_db_mode_full, p_file_info);
|
|
if (unlikely(rc)){
|
|
throw_error(p_file_info->chartname, ERR_TYPE_LIBUV, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
goto return_error;
|
|
}
|
|
}
|
|
}
|
|
rc = sqlite3_finalize(stmt_search_if_log_source_exists);
|
|
if (unlikely(rc != SQLITE_OK)){
|
|
throw_error(MAIN_DB, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
// TODO: Some additional cleanup required here, e.g. terminate db_writer_thread.
|
|
goto return_error;
|
|
}
|
|
rc = sqlite3_finalize(stmt_insert_log_collection_metadata);
|
|
if (unlikely(rc != SQLITE_OK)){
|
|
throw_error(MAIN_DB, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
// TODO: Some additional cleanup required here, e.g. terminate db_writer_thread.
|
|
goto return_error;
|
|
}
|
|
|
|
return 0;
|
|
|
|
return_error:
|
|
freez(main_db_path);
|
|
main_db_path = NULL;
|
|
|
|
sqlite3_close(main_db); // No-op if main_db == NULL
|
|
sqlite3_free(err_msg); // No-op if err_msg == NULL
|
|
|
|
m_assert(rc != 0, "rc should not be == 0 in case of error");
|
|
return rc == 0 ? -1 : rc;
|
|
}
|
|
|
|
/**
|
|
* @brief Search database(s) for logs
|
|
* @details This function searches one or more databases for any results
|
|
* matching the query parameters. If any results are found, it will decompress
|
|
* the text of each returned row and add it to the results buffer, up to a
|
|
* maximum amount of p_query_params->quota bytes (unless timed out).
|
|
* @todo Make decompress buffer static to reduce mallocs/frees.
|
|
* @todo Limit number of results returned through SQLite Query to speed up search?
|
|
*/
|
|
void db_search(logs_query_params_t *const p_query_params, struct File_info *const p_file_infos[]) {
|
|
int rc = 0;
|
|
|
|
sqlite3_stmt *stmt_get_log_msg_metadata;
|
|
sqlite3 *dbt = NULL; // Used only when multiple DBs are searched
|
|
|
|
if(!p_file_infos[1]){ /* Single DB to be searched */
|
|
stmt_get_log_msg_metadata = p_query_params->order_by_asc ?
|
|
p_file_infos[0]->stmt_get_log_msg_metadata_asc : p_file_infos[0]->stmt_get_log_msg_metadata_desc;
|
|
if(unlikely(
|
|
SQLITE_OK != (rc = sqlite3_bind_int64(stmt_get_log_msg_metadata, 1, p_query_params->req_from_ts)) ||
|
|
SQLITE_OK != (rc = sqlite3_bind_int64(stmt_get_log_msg_metadata, 2, p_query_params->req_to_ts)) ||
|
|
(SQLITE_ROW != (rc = sqlite3_step(stmt_get_log_msg_metadata)) && (SQLITE_DONE != rc))
|
|
)){
|
|
throw_error(p_file_infos[0]->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
// TODO: If there are errors here, should db_writer_db_mode_full() be terminated?
|
|
sqlite3_reset(stmt_get_log_msg_metadata);
|
|
return;
|
|
}
|
|
} else { /* Multiple DBs to be searched */
|
|
sqlite3_stmt *stmt_attach_db;
|
|
sqlite3_stmt *stmt_create_tmp_view;
|
|
int pfi_off = 0;
|
|
|
|
/* Open a new DB connection on the first log source DB and attach other DBs */
|
|
if(unlikely(
|
|
SQLITE_OK != (rc = sqlite3_open_v2(p_file_infos[0]->db_metadata, &dbt, SQLITE_OPEN_READONLY, NULL)) ||
|
|
SQLITE_OK != (rc = sqlite3_prepare_v2(dbt,"ATTACH DATABASE ? AS ? ;", -1, &stmt_attach_db, NULL))
|
|
)){
|
|
throw_error(p_file_infos[0]->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
sqlite3_close_v2(dbt);
|
|
return;
|
|
}
|
|
for(pfi_off = 0; p_file_infos[pfi_off]; pfi_off++){
|
|
if(unlikely(
|
|
SQLITE_OK != (rc = sqlite3_bind_text(stmt_attach_db, 1, p_file_infos[pfi_off]->db_metadata, -1, NULL)) ||
|
|
SQLITE_OK != (rc = sqlite3_bind_int(stmt_attach_db, 2, pfi_off)) ||
|
|
SQLITE_DONE != (rc = sqlite3_step(stmt_attach_db)) ||
|
|
SQLITE_OK != (rc = sqlite3_reset(stmt_attach_db))
|
|
)){
|
|
throw_error(p_file_infos[pfi_off]->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
sqlite3_close_v2(dbt);
|
|
return;
|
|
}
|
|
}
|
|
|
|
/* Create temporary view, then prepare retrieval of metadata from
|
|
* TMP_VIEW_TABLE statement and execute search.
|
|
* TODO: Limit number of results returned through SQLite Query to speed up search? */
|
|
#define TMP_VIEW_TABLE "compound_view"
|
|
#define TMP_VIEW_QUERY_PREFIX "CREATE TEMP VIEW " TMP_VIEW_TABLE " AS SELECT * FROM (SELECT * FROM '0'."\
|
|
LOGS_TABLE " INNER JOIN (VALUES(0)) ORDER BY Timestamp) "
|
|
#define TMP_VIEW_QUERY_BODY_1 "UNION ALL SELECT * FROM (SELECT * FROM '"
|
|
#define TMP_VIEW_QUERY_BODY_2 "'." LOGS_TABLE " INNER JOIN (VALUES("
|
|
#define TMP_VIEW_QUERY_BODY_3 ")) ORDER BY Timestamp) "
|
|
#define TMP_VIEW_QUERY_POSTFIX "ORDER BY Timestamp;"
|
|
|
|
char tmp_view_query[sizeof(TMP_VIEW_QUERY_PREFIX) + (
|
|
sizeof(TMP_VIEW_QUERY_BODY_1) +
|
|
sizeof(TMP_VIEW_QUERY_BODY_2) +
|
|
sizeof(TMP_VIEW_QUERY_BODY_3) + 4
|
|
) * (LOGS_MANAG_MAX_COMPOUND_QUERY_SOURCES - 1) +
|
|
sizeof(TMP_VIEW_QUERY_POSTFIX) +
|
|
50 /* +50 bytes to play it safe */] = TMP_VIEW_QUERY_PREFIX;
|
|
int pos = sizeof(TMP_VIEW_QUERY_PREFIX) - 1;
|
|
for(pfi_off = 1; p_file_infos[pfi_off]; pfi_off++){ // Skip p_file_infos[0]
|
|
int n = snprintf(&tmp_view_query[pos], sizeof(tmp_view_query) - pos, "%s%d%s%d%s",
|
|
TMP_VIEW_QUERY_BODY_1, pfi_off,
|
|
TMP_VIEW_QUERY_BODY_2, pfi_off,
|
|
TMP_VIEW_QUERY_BODY_3);
|
|
|
|
if (n < 0 || n >= (int) sizeof(tmp_view_query) - pos){
|
|
throw_error(p_file_infos[pfi_off]->chartname, ERR_TYPE_OTHER, n, __LINE__, __FILE__, __FUNCTION__);
|
|
sqlite3_close_v2(dbt);
|
|
return;
|
|
}
|
|
pos += n;
|
|
}
|
|
snprintf(&tmp_view_query[pos], sizeof(tmp_view_query) - pos, "%s", TMP_VIEW_QUERY_POSTFIX);
|
|
|
|
if(unlikely(
|
|
SQLITE_OK != (rc = sqlite3_prepare_v2(dbt, tmp_view_query, -1, &stmt_create_tmp_view, NULL)) ||
|
|
SQLITE_DONE != (rc = sqlite3_step(stmt_create_tmp_view)) ||
|
|
SQLITE_OK != (rc = sqlite3_prepare_v2(dbt, p_query_params->order_by_asc ?
|
|
|
|
"SELECT Timestamp, Msg_compr_size , Msg_decompr_size, "
|
|
"BLOB_Offset, FK_BLOB_Id, Num_lines, column1 "
|
|
"FROM " TMP_VIEW_TABLE " "
|
|
"WHERE Timestamp >= ? AND Timestamp <= ?;" :
|
|
|
|
/* TODO: The following can also be done by defining
|
|
* a descending order tmp_view_query, which will
|
|
* probably be faster. Needs to be measured. */
|
|
|
|
"SELECT Timestamp, Msg_compr_size , Msg_decompr_size, "
|
|
"BLOB_Offset, FK_BLOB_Id, Num_lines, column1 "
|
|
"FROM " TMP_VIEW_TABLE " "
|
|
"WHERE Timestamp <= ? AND Timestamp >= ? ORDER BY Timestamp DESC;",
|
|
|
|
-1, &stmt_get_log_msg_metadata, NULL)) ||
|
|
SQLITE_OK != (rc = sqlite3_bind_int64(stmt_get_log_msg_metadata, 1,
|
|
(sqlite3_int64)p_query_params->req_from_ts)) ||
|
|
SQLITE_OK != (rc = sqlite3_bind_int64(stmt_get_log_msg_metadata, 2,
|
|
(sqlite3_int64)p_query_params->req_to_ts)) ||
|
|
(SQLITE_ROW != (rc = sqlite3_step(stmt_get_log_msg_metadata)) && (SQLITE_DONE != rc))
|
|
)){
|
|
throw_error(p_file_infos[0]->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
sqlite3_close_v2(dbt);
|
|
return;
|
|
}
|
|
}
|
|
|
|
Circ_buff_item_t tmp_itm = {0};
|
|
|
|
BUFFER *const res_buff = p_query_params->results_buff;
|
|
logs_query_res_hdr_t res_hdr = { // results header
|
|
.timestamp = p_query_params->act_to_ts,
|
|
.text_size = 0,
|
|
.matches = 0,
|
|
.log_source = "",
|
|
.log_type = "",
|
|
.basename = "",
|
|
.filename = "",
|
|
.chartname =""
|
|
};
|
|
size_t text_compressed_size_max = 0;
|
|
|
|
while (rc == SQLITE_ROW) {
|
|
|
|
/* Retrieve metadata from DB */
|
|
tmp_itm.timestamp = (msec_t)sqlite3_column_int64(stmt_get_log_msg_metadata, 0);
|
|
tmp_itm.text_compressed_size = (size_t)sqlite3_column_int64(stmt_get_log_msg_metadata, 1);
|
|
tmp_itm.text_size = (size_t)sqlite3_column_int64(stmt_get_log_msg_metadata, 2);
|
|
int64_t blob_offset = (int64_t) sqlite3_column_int64(stmt_get_log_msg_metadata, 3);
|
|
int blob_handles_offset = sqlite3_column_int(stmt_get_log_msg_metadata, 4);
|
|
unsigned long num_lines = (unsigned long) sqlite3_column_int64(stmt_get_log_msg_metadata, 5);
|
|
int db_off = p_file_infos[1] ? sqlite3_column_int(stmt_get_log_msg_metadata, 6) : 0;
|
|
|
|
/* If exceeding quota or timeout is reached and new timestamp
|
|
* is different than previous, terminate query. */
|
|
if((res_buff->len >= p_query_params->quota || now_monotonic_usec() > p_query_params->stop_monotonic_ut) &&
|
|
tmp_itm.timestamp != res_hdr.timestamp){
|
|
p_query_params->act_to_ts = res_hdr.timestamp;
|
|
break;
|
|
}
|
|
|
|
res_hdr.timestamp = tmp_itm.timestamp;
|
|
snprintfz(res_hdr.log_source, sizeof(res_hdr.log_source), "%s", log_src_t_str[p_file_infos[db_off]->log_source]);
|
|
snprintfz(res_hdr.log_type, sizeof(res_hdr.log_type), "%s", log_src_type_t_str[p_file_infos[db_off]->log_type]);
|
|
snprintfz(res_hdr.basename, sizeof(res_hdr.basename), "%s", p_file_infos[db_off]->file_basename);
|
|
snprintfz(res_hdr.filename, sizeof(res_hdr.filename), "%s", p_file_infos[db_off]->filename);
|
|
snprintfz(res_hdr.chartname, sizeof(res_hdr.chartname), "%s", p_file_infos[db_off]->chartname);
|
|
|
|
/* Retrieve compressed log messages from BLOB file */
|
|
if(tmp_itm.text_compressed_size > text_compressed_size_max){
|
|
text_compressed_size_max = tmp_itm.text_compressed_size;
|
|
tmp_itm.text_compressed = reallocz(tmp_itm.text_compressed, text_compressed_size_max);
|
|
}
|
|
uv_fs_t read_req;
|
|
uv_buf_t uv_buf = uv_buf_init(tmp_itm.text_compressed, tmp_itm.text_compressed_size);
|
|
rc = uv_fs_read(NULL,
|
|
&read_req,
|
|
p_file_infos[db_off]->blob_handles[blob_handles_offset],
|
|
&uv_buf, 1, blob_offset, NULL);
|
|
uv_fs_req_cleanup(&read_req);
|
|
if (unlikely(rc < 0)){
|
|
throw_error(NULL, ERR_TYPE_LIBUV, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
break;
|
|
}
|
|
|
|
/* Append retrieved results to BUFFER.
|
|
* In the case of search_keyword(), less than sizeof(res_hdr) + tmp_itm.text_size
|
|
*space may be required, but go for worst case scenario for now */
|
|
buffer_increase(res_buff, sizeof(res_hdr) + tmp_itm.text_size);
|
|
|
|
if(!p_query_params->keyword || !*p_query_params->keyword || !strcmp(p_query_params->keyword, " ")){
|
|
rc = LZ4_decompress_safe(tmp_itm.text_compressed,
|
|
&res_buff->buffer[res_buff->len + sizeof(res_hdr)],
|
|
tmp_itm.text_compressed_size,
|
|
tmp_itm.text_size);
|
|
|
|
if(unlikely(rc < 0)){
|
|
throw_error(p_file_infos[db_off]->chartname, ERR_TYPE_OTHER, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
break;
|
|
}
|
|
|
|
res_hdr.matches = num_lines;
|
|
res_hdr.text_size = tmp_itm.text_size;
|
|
}
|
|
else {
|
|
tmp_itm.data = mallocz(tmp_itm.text_size);
|
|
rc = LZ4_decompress_safe(tmp_itm.text_compressed,
|
|
tmp_itm.data,
|
|
tmp_itm.text_compressed_size,
|
|
tmp_itm.text_size);
|
|
|
|
if(unlikely(rc < 0)){
|
|
freez(tmp_itm.data);
|
|
throw_error(p_file_infos[db_off]->chartname, ERR_TYPE_OTHER, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
break;
|
|
}
|
|
|
|
res_hdr.matches = search_keyword( tmp_itm.data, tmp_itm.text_size,
|
|
&res_buff->buffer[res_buff->len + sizeof(res_hdr)],
|
|
&res_hdr.text_size, p_query_params->keyword, NULL,
|
|
p_query_params->ignore_case);
|
|
freez(tmp_itm.data);
|
|
|
|
m_assert( (res_hdr.matches > 0 && res_hdr.text_size > 0) ||
|
|
(res_hdr.matches == 0 && res_hdr.text_size == 0),
|
|
"res_hdr.matches and res_hdr.text_size must both be > 0 or == 0.");
|
|
|
|
if(unlikely(res_hdr.matches < 0)){ /* res_hdr.matches < 0 - error during keyword search */
|
|
throw_error(p_file_infos[db_off]->chartname, ERR_TYPE_LIBUV, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
break;
|
|
}
|
|
}
|
|
|
|
if(res_hdr.text_size){
|
|
res_buff->buffer[res_buff->len + sizeof(res_hdr) + res_hdr.text_size - 1] = '\n'; // replace '\0' with '\n'
|
|
memcpy(&res_buff->buffer[res_buff->len], &res_hdr, sizeof(res_hdr));
|
|
res_buff->len += sizeof(res_hdr) + res_hdr.text_size;
|
|
p_query_params->num_lines += res_hdr.matches;
|
|
}
|
|
|
|
m_assert(TEST_MS_TIMESTAMP_VALID(res_hdr.timestamp), "res_hdr.timestamp is invalid");
|
|
|
|
rc = sqlite3_step(stmt_get_log_msg_metadata);
|
|
if (unlikely(rc != SQLITE_ROW && rc != SQLITE_DONE)){
|
|
throw_error(p_file_infos[db_off]->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
// TODO: If there are errors here, should db_writer_db_mode_full() be terminated?
|
|
break;
|
|
}
|
|
}
|
|
|
|
if(tmp_itm.text_compressed)
|
|
freez(tmp_itm.text_compressed);
|
|
|
|
if(p_file_infos[1])
|
|
rc = sqlite3_close_v2(dbt);
|
|
else
|
|
rc = sqlite3_reset(stmt_get_log_msg_metadata);
|
|
|
|
if (unlikely(SQLITE_OK != rc))
|
|
throw_error(p_file_infos[0]->chartname, ERR_TYPE_SQLITE, rc, __LINE__, __FILE__, __FUNCTION__);
|
|
}
|