From 4a92e9e541283e7f6cb7bf0257816726db22f1a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=BA=C5=A1=20Tomlein?= Date: Tue, 5 Apr 2022 13:54:26 +0200 Subject: [PATCH 01/35] Add support for Linux in HTTP client (close #5) PR #49 --- Makefile | 5 +- examples/main.cpp | 2 +- performance/logs.txt | 1 + performance/mock_emitter.hpp | 2 +- performance/mute_emitter.hpp | 2 +- snowplow-cpp-tracker-example.vcxproj | 11 +- snowplow-cpp-tracker-example.vcxproj.filters | 13 +- snowplow-cpp-tracker.vcxproj | 17 +- snowplow-cpp-tracker.vcxproj.filters | 29 +- src/emitter.cpp | 34 ++- src/emitter.hpp | 32 ++- src/http/http_client.hpp | 46 +++ src/http/http_client_apple.cpp | 96 +++++++ src/http/http_client_apple.hpp | 46 +++ src/http/http_client_curl.cpp | 78 ++++++ src/http/http_client_curl.hpp | 47 ++++ src/http/http_client_windows.cpp | 134 +++++++++ src/http/http_client_windows.hpp | 46 +++ src/{ => http}/http_request_result.cpp | 0 src/{ => http}/http_request_result.hpp | 0 src/http_client.cpp | 263 ------------------ test/emitter_test.cpp | 42 ++- test/{ => http}/http_client_test.cpp | 28 +- test/{ => http}/http_request_result_test.cpp | 4 +- test/http/test_http_client.cpp | 55 ++++ .../http/test_http_client.hpp | 49 +--- test/tracker_test.cpp | 3 +- 27 files changed, 699 insertions(+), 386 deletions(-) create mode 100644 src/http/http_client.hpp create mode 100644 src/http/http_client_apple.cpp create mode 100644 src/http/http_client_apple.hpp create mode 100644 src/http/http_client_curl.cpp create mode 100644 src/http/http_client_curl.hpp create mode 100644 src/http/http_client_windows.cpp create mode 100644 src/http/http_client_windows.hpp rename src/{ => http}/http_request_result.cpp (100%) rename src/{ => http}/http_request_result.hpp (100%) delete mode 100644 src/http_client.cpp rename test/{ => http}/http_client_test.cpp (73%) rename test/{ => http}/http_request_result_test.cpp (96%) create mode 100644 test/http/test_http_client.cpp rename src/http_client.hpp => test/http/test_http_client.hpp (52%) diff --git a/Makefile b/Makefile index 22b09d0..c91509a 100644 --- a/Makefile +++ b/Makefile @@ -18,9 +18,9 @@ cc-objects := $(patsubst %.c, %.o, $(cc-include-files)) # C++ Files -cxx-src-files := $(shell find src -maxdepth 1 -name "*.cpp") +cxx-src-files := $(shell find src -maxdepth 2 -name "*.cpp") cxx-include-files := $(shell find include -maxdepth 1 -name "*.cpp") -cxx-test-files := $(shell find test -maxdepth 1 -name "*.cpp") +cxx-test-files := $(shell find test -maxdepth 2 -name "*.cpp") cxx-example-files := $(shell find examples -maxdepth 1 -name "*.cpp") cxx-performance-files := $(shell find performance -maxdepth 1 -name "*.cpp") @@ -42,6 +42,7 @@ OBJCXX := c++ CCFLAGS := -Werror -g CXXFLAGS := -std=c++11 -Werror -g -D SNOWPLOW_TEST_SUITE --coverage -O0 LDFLAGS := -framework CoreFoundation -framework CFNetwork -framework Foundation -framework CoreServices +LDLIBS := -lcurl # Building diff --git a/examples/main.cpp b/examples/main.cpp index 5f35e96..8751ed3 100644 --- a/examples/main.cpp +++ b/examples/main.cpp @@ -26,7 +26,7 @@ int main(int argc, char **argv) { string uri = argv[1]; string db_name = "demo.db"; - Emitter emitter(uri, Emitter::Method::POST, Emitter::Protocol::HTTP, 52000, 52000, 500, db_name); + Emitter emitter(uri, Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 52000, db_name); Subject subject; subject.set_user_id("a-user-id"); diff --git a/performance/logs.txt b/performance/logs.txt index e5486f5..aff86cd 100644 --- a/performance/logs.txt +++ b/performance/logs.txt @@ -4,3 +4,4 @@ {"desktop_context":{"data":{"deviceManufacturer":"Apple Inc.","deviceModel":"MacBookPro17,1","deviceProcessorCount":8,"osIs64Bit":true,"osServicePack":"","osType":"macOS","osVersion":"12.2.0"},"schema":"iglu:com.snowplowanalytics.snowplow/desktop_context/jsonschema/1-0-0"},"results":{"mocked_emitter_and_mocked_session":5.217072375,"mocked_emitter_and_real_session":5.673155916,"mute_emitter_and_mocked_session":36.860906125,"mute_emitter_and_real_session":33.030649,"num_operations":10000,"num_threads":5},"timestamp":1645005862283,"tracker_version":"cpp-0.1.0"} {"desktop_context":{"data":{"deviceManufacturer":"Apple Inc.","deviceModel":"MacBookPro17,1","deviceProcessorCount":8,"osIs64Bit":true,"osServicePack":"","osType":"macOS","osVersion":"12.2.0"},"schema":"iglu:com.snowplowanalytics.snowplow/desktop_context/jsonschema/1-0-0"},"results":{"mocked_emitter_and_mocked_session":5.169725125,"mocked_emitter_and_real_session":5.426304083,"mute_emitter_and_mocked_session":27.828702709,"mute_emitter_and_real_session":32.127459,"num_operations":10000,"num_threads":5},"timestamp":1645006143834,"tracker_version":"cpp-0.1.0"} {"desktop_context":{"data":{"deviceManufacturer":"Apple Inc.","deviceModel":"MacBookPro17,1","deviceProcessorCount":8,"osIs64Bit":true,"osServicePack":"","osType":"macOS","osVersion":"12.2.0"},"schema":"iglu:com.snowplowanalytics.snowplow/desktop_context/jsonschema/1-0-0"},"results":{"mocked_emitter_and_mocked_session":5.742478167,"mocked_emitter_and_real_session":5.050215375,"mute_emitter_and_mocked_session":25.8688495,"mute_emitter_and_real_session":19.645383417,"num_operations":10000,"num_threads":5},"timestamp":1645041474207,"tracker_version":"cpp-0.1.0"} +{"desktop_context":{"data":{"deviceManufacturer":"Apple Inc.","deviceModel":"MacBookPro17,1","deviceProcessorCount":8,"osIs64Bit":true,"osServicePack":"","osType":"macOS","osVersion":"12.2.0"},"schema":"iglu:com.snowplowanalytics.snowplow/desktop_context/jsonschema/1-0-0"},"results":{"mocked_emitter_and_mocked_session":6.493122875,"mocked_emitter_and_real_session":6.679922959,"mute_emitter_and_mocked_session":18.915125958,"mute_emitter_and_real_session":17.245701792,"num_operations":10000,"num_threads":5},"timestamp":1648794323772,"tracker_version":"cpp-0.2.0"} diff --git a/performance/mock_emitter.hpp b/performance/mock_emitter.hpp index 99c5b68..cf09db6 100644 --- a/performance/mock_emitter.hpp +++ b/performance/mock_emitter.hpp @@ -22,7 +22,7 @@ using std::string; class MockEmitter : public Emitter { public: - MockEmitter(const string &db_name) : Emitter("127.0.0.1:9090", Emitter::Method::POST, Emitter::Protocol::HTTP, 52000, 52000, 500, db_name) {} + MockEmitter(const string &db_name) : Emitter("127.0.0.1:9090", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 52000, db_name) {} void start() {} void stop() {} diff --git a/performance/mute_emitter.hpp b/performance/mute_emitter.hpp index 4dbdf75..13a9953 100644 --- a/performance/mute_emitter.hpp +++ b/performance/mute_emitter.hpp @@ -21,7 +21,7 @@ using std::string; class MuteEmitter : public Emitter { public: - MuteEmitter(const string &db_name) : Emitter("127.0.0.1:9090", Emitter::Method::POST, Emitter::Protocol::HTTP, 52000, 52000, 500, db_name) {} + MuteEmitter(const string &db_name) : Emitter("127.0.0.1:9090", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 52000, db_name) {} void start() {} void stop() {} diff --git a/snowplow-cpp-tracker-example.vcxproj b/snowplow-cpp-tracker-example.vcxproj index 8c6c1cc..0110a9e 100644 --- a/snowplow-cpp-tracker-example.vcxproj +++ b/snowplow-cpp-tracker-example.vcxproj @@ -103,8 +103,8 @@ - - + + @@ -120,8 +120,9 @@ - - + + + @@ -132,4 +133,4 @@ - \ No newline at end of file + diff --git a/snowplow-cpp-tracker-example.vcxproj.filters b/snowplow-cpp-tracker-example.vcxproj.filters index 0d58579..7c7f955 100644 --- a/snowplow-cpp-tracker-example.vcxproj.filters +++ b/snowplow-cpp-tracker-example.vcxproj.filters @@ -24,10 +24,10 @@ Source Files - + Source Files - + Source Files @@ -71,10 +71,13 @@ Header Files - + Header Files - + + Header Files + + Header Files @@ -105,4 +108,4 @@ Header Files - \ No newline at end of file + diff --git a/snowplow-cpp-tracker.vcxproj b/snowplow-cpp-tracker.vcxproj index 2cc5c50..c4588ef 100644 --- a/snowplow-cpp-tracker.vcxproj +++ b/snowplow-cpp-tracker.vcxproj @@ -107,19 +107,20 @@ - - + + + - - + + @@ -136,17 +137,19 @@ - - + + + + - \ No newline at end of file + diff --git a/snowplow-cpp-tracker.vcxproj.filters b/snowplow-cpp-tracker.vcxproj.filters index 65872ea..74939d1 100644 --- a/snowplow-cpp-tracker.vcxproj.filters +++ b/snowplow-cpp-tracker.vcxproj.filters @@ -24,10 +24,10 @@ Source Files - + Source Files - + Source Files @@ -54,6 +54,9 @@ Source Files + + Source Files + Source Files @@ -63,10 +66,10 @@ Source Files - + Source Files - + Source Files @@ -104,10 +107,13 @@ Header Files - + + Header Files + + Header Files - + Header Files @@ -137,8 +143,11 @@ Header Files - - Header Files - + + header files + + + header files + - \ No newline at end of file + diff --git a/src/emitter.cpp b/src/emitter.cpp index ef67d8e..2600093 100644 --- a/src/emitter.cpp +++ b/src/emitter.cpp @@ -18,12 +18,34 @@ using std::invalid_argument; using std::lock_guard; using std::stringstream; using std::unique_lock; +using std::unique_ptr; const int post_wrapper_bytes = 88; // "schema":"iglu:com.snowplowanalytics.snowplow/payload_data/jsonschema/1-0-4","data":[] const int post_stm_bytes = 22; // "stm":"1443452851000" +#if defined(__APPLE__) +#include "http/http_client_apple.hpp" +unique_ptr createDefaultHttpClient() { + return unique_ptr(new HttpClientApple()); +} +#elif defined(WIN32) || defined(_WIN32) || defined(__WIN32) && !defined(__CYGWIN__) +#include "http/http_client_windows.hpp" +unique_ptr createDefaultHttpClient() { + return unique_ptr(new HttpClientWindows()); +} +#else +#include "http/http_client_curl.hpp" +unique_ptr createDefaultHttpClient() { + return unique_ptr(new HttpClientCurl()); +} +#endif + +Emitter::Emitter(const string &uri, Method method, Protocol protocol, int send_limit, + int byte_limit_post, int byte_limit_get, const string &db_name) : Emitter(uri, method, protocol, send_limit, byte_limit_post, byte_limit_get, db_name, createDefaultHttpClient()) { +} + Emitter::Emitter(const string &uri, Method method, Protocol protocol, int send_limit, - int byte_limit_post, int byte_limit_get, const string &db_name) : m_url(this->get_collector_url(uri, protocol, method)) { + int byte_limit_post, int byte_limit_get, const string &db_name, unique_ptr http_client) : m_url(this->get_collector_url(uri, protocol, method)) { Storage::init(db_name); @@ -50,6 +72,7 @@ Emitter::Emitter(const string &uri, Method method, Protocol protocol, int send_l this->m_send_limit = send_limit; this->m_byte_limit_post = byte_limit_post; this->m_byte_limit_get = byte_limit_get; + this->m_http_client = std::move(http_client); } Emitter::~Emitter() { @@ -154,7 +177,8 @@ void Emitter::do_send(list *event_rows, list row_id = {it->id}; - request_futures.push_back(std::async(HttpClient::http_get, this->m_url, query_string, row_id, (query_string.size() > this->m_byte_limit_get))); + request_futures.push_back(std::async(&HttpClient::http_get, this->m_http_client.get(), this->m_url, query_string, row_id, (query_string.size() > this->m_byte_limit_get))); + request_futures.push_back(std::async(&HttpClient::http_get, this->m_http_client.get(), this->m_url, query_string, row_id, (query_string.size() > this->m_byte_limit_get))); } } else { list row_ids; @@ -168,13 +192,13 @@ void Emitter::do_send(list *event_rows, list single_row_id = {it->id}; list single_payload = {it->event}; - request_futures.push_back(std::async(HttpClient::http_post, this->m_url, this->build_post_data_json(single_payload), single_row_id, true)); + request_futures.push_back(std::async(&HttpClient::http_post, this->m_http_client.get(), this->m_url, this->build_post_data_json(single_payload), single_row_id, true)); single_row_id.clear(); single_payload.clear(); } else if ((total_byte_size + byte_size + post_wrapper_bytes + (payloads.size() - 1)) > this->m_byte_limit_post) { // Byte limit reached - request_futures.push_back(std::async(HttpClient::http_post, this->m_url, this->build_post_data_json(payloads), row_ids, false)); + request_futures.push_back(std::async(&HttpClient::http_post, this->m_http_client.get(), this->m_url, this->build_post_data_json(payloads), row_ids, false)); // Reset accumulators row_ids.clear(); @@ -190,7 +214,7 @@ void Emitter::do_send(list *event_rows, list 0) { - request_futures.push_back(std::async(HttpClient::http_post, this->m_url, this->build_post_data_json(payloads), row_ids, false)); + request_futures.push_back(std::async(&HttpClient::http_post, this->m_http_client.get(), this->m_url, this->build_post_data_json(payloads), row_ids, false)); } } diff --git a/src/emitter.hpp b/src/emitter.hpp index 1a915e3..468f17e 100644 --- a/src/emitter.hpp +++ b/src/emitter.hpp @@ -25,14 +25,15 @@ See the Apache License Version 2.0 for the specific language governing permissio #include "storage.hpp" #include "payload.hpp" #include "self_describing_json.hpp" -#include "http_client.hpp" -#include "http_request_result.hpp" #include "cracked_url.hpp" +#include "http/http_request_result.hpp" +#include "http/http_client.hpp" using std::string; using std::thread; using std::condition_variable; using std::mutex; +using std::unique_ptr; namespace snowplow { /** @@ -46,6 +47,10 @@ namespace snowplow { * 4. The emitter will send all of these events as determined by the Request, Protocol and ByteLimits * - Each request is sent in its thread. * 5. Once sent it will process the results of all the requests sent and will remove all successfully sent events from the database + * + * You may optionally configure the HTTP client to be used to make HTTP requests to the collector. + * This is done by passing a unique pointer to a class inheriting from `HttpClient` that the Emitter will take ownership of. + * If not configured, the Emitter will use the built-in `HttpClientWindows` on Windows, `HttpClientApple` on Apple operating systems, and `HttpClientCurl` on other Unix systems. */ class Emitter { public: @@ -67,10 +72,10 @@ class Emitter { /** * @brief Construct a new Emitter object - * + * * The `db_name` can be any valid path on your host file system (that can be created with the current user). * By default it will create the required files wherever the application is being run from. - * + * * @param uri The URI to send events to * @param method The request type to use (GET or POST) * @param protocol The protocol to use (http or https) @@ -81,6 +86,24 @@ class Emitter { */ Emitter(const string & uri, Method method, Protocol protocol, int send_limit, int byte_limit_post, int byte_limit_get, const string & db_name); + + /** + * @brief Construct a new Emitter object with a custom HTTP client + * + * The `db_name` can be any valid path on your host file system (that can be created with the current user). + * By default it will create the required files wherever the application is being run from. + * + * @param uri The URI to send events to + * @param method The request type to use (GET or POST) + * @param protocol The protocol to use (http or https) + * @param send_limit The maximum amount of events to send at a time + * @param byte_limit_post The byte limit when sending a POST request + * @param byte_limit_get The byte limit when sending a GET request + * @param db_name Defines the path and file name of the database + * @param http_client Unique pointer to a custom HTTP client to send GET and POST requests with + */ + Emitter(const string & uri, Method method, Protocol protocol, int send_limit, + int byte_limit_post, int byte_limit_get, const string & db_name, unique_ptr http_client); ~Emitter(); /** @@ -150,6 +173,7 @@ class Emitter { private: CrackedUrl m_url; Method m_method; + unique_ptr m_http_client; unsigned int m_send_limit; unsigned int m_byte_limit_get; unsigned int m_byte_limit_post; diff --git a/src/http/http_client.hpp b/src/http/http_client.hpp new file mode 100644 index 0000000..1c63570 --- /dev/null +++ b/src/http/http_client.hpp @@ -0,0 +1,46 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#ifndef HTTP_CLIENT_H +#define HTTP_CLIENT_H + +#include +#include "../cracked_url.hpp" +#include "http_request_result.hpp" + +using std::string; +using std::list; + +namespace snowplow { +/** + * @brief Abstract base class for HTTP client for making requests to Snowplow Collector. It is used by Emitter. + */ +class HttpClient { +public: + enum RequestMethod { POST, GET }; + + virtual ~HttpClient() {} + + HttpRequestResult http_post(const CrackedUrl url, const string &post_data, list row_ids, bool oversize) { + return http_request(POST, url, "", post_data, row_ids, oversize); + } + HttpRequestResult http_get(const CrackedUrl url, const string &query_string, list row_ids, bool oversize) { + return http_request(GET, url, query_string, "", row_ids, oversize); + } + +protected: + virtual HttpRequestResult http_request(const RequestMethod method, const CrackedUrl url, const string & query_string, const string & post_data, list row_ids, bool oversize) = 0; +}; +} + +#endif diff --git a/src/http/http_client_apple.cpp b/src/http/http_client_apple.cpp new file mode 100644 index 0000000..4dada88 --- /dev/null +++ b/src/http/http_client_apple.cpp @@ -0,0 +1,96 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#if defined(__APPLE__) +#include "http_client_apple.hpp" +#include "../constants.hpp" + +#include +#include +#include + +using namespace snowplow; +using std::cerr; +using std::endl; +using std::lock_guard; + +const string HttpClientApple::TRACKER_AGENT = string("Snowplow C++ Tracker (macOS)"); + +HttpRequestResult HttpClientApple::http_request(const RequestMethod method, CrackedUrl url, const string &query_string, const string &post_data, list row_ids, bool oversize) { + + // Get final url + string final_url = url.to_string(); + if (method == GET) { + final_url += "?" + query_string; + } + + // Create request + CFStringRef cf_url_str = CFStringCreateWithBytes(kCFAllocatorDefault, (const unsigned char *)final_url.c_str(), final_url.length(), kCFStringEncodingUTF8, false); + CFStringRef cf_content_type_str = CFStringCreateWithBytes(kCFAllocatorDefault, (const unsigned char *)SNOWPLOW_POST_CONTENT_TYPE.c_str(), SNOWPLOW_POST_CONTENT_TYPE.length(), kCFStringEncodingUTF8, false); + CFStringRef cf_user_agent_str = CFStringCreateWithBytes(kCFAllocatorDefault, (const unsigned char *)HttpClientApple::TRACKER_AGENT.c_str(), HttpClientApple::TRACKER_AGENT.length(), kCFStringEncodingUTF8, false); + + CFURLRef cf_url = CFURLCreateWithString(kCFAllocatorDefault, cf_url_str, NULL); + CFHTTPMessageRef cf_http_req; + + if (method == GET) { + cf_http_req = CFHTTPMessageCreateRequest(kCFAllocatorDefault, CFSTR("GET"), cf_url, kCFHTTPVersion1_1); + } else { + cf_http_req = CFHTTPMessageCreateRequest(kCFAllocatorDefault, CFSTR("POST"), cf_url, kCFHTTPVersion1_1); + CFDataRef cf_post_data = CFDataCreate(kCFAllocatorDefault, (const UInt8 *)post_data.data(), post_data.size()); + CFHTTPMessageSetBody(cf_http_req, cf_post_data); + if (cf_post_data) { + CFRelease(cf_post_data); + } + CFHTTPMessageSetHeaderFieldValue(cf_http_req, CFSTR("Content-Type"), cf_content_type_str); + } + CFHTTPMessageSetHeaderFieldValue(cf_http_req, CFSTR("User-Agent"), cf_user_agent_str); + CFHTTPMessageSetHeaderFieldValue(cf_http_req, CFSTR("Connection"), CFSTR("keep-alive")); + + CFReadStreamRef cf_read_stream = CFReadStreamCreateForHTTPRequest(kCFAllocatorDefault, cf_http_req); + CFMutableDataRef cf_data_resp = CFDataCreateMutable(kCFAllocatorDefault, 0); + + // Send request + CFReadStreamOpen(cf_read_stream); + CFIndex num_bytes_read; + do { + const int buff_size = 1024; + UInt8 buff[buff_size]; + num_bytes_read = CFReadStreamRead(cf_read_stream, buff, buff_size); + + if (num_bytes_read > 0) { + CFDataAppendBytes(cf_data_resp, buff, num_bytes_read); + } else if (num_bytes_read < 0) { + CFStreamError error = CFReadStreamGetError(cf_read_stream); + cerr << error.error << endl; + } + } while (num_bytes_read > 0); + + // Process result + CFHTTPMessageRef cf_http_resp = (CFHTTPMessageRef)CFReadStreamCopyProperty(cf_read_stream, kCFStreamPropertyHTTPResponseHeader); + int cf_status_code = CFHTTPMessageGetResponseStatusCode(cf_http_resp); + + // Release resources + CFReadStreamClose(cf_read_stream); + CFRelease(cf_url_str); + CFRelease(cf_content_type_str); + CFRelease(cf_user_agent_str); + CFRelease(cf_url); + CFRelease(cf_http_req); + CFRelease(cf_read_stream); + CFRelease(cf_data_resp); + CFRelease(cf_http_resp); + + return HttpRequestResult(0, cf_status_code, row_ids, oversize); +} + +#endif diff --git a/src/http/http_client_apple.hpp b/src/http/http_client_apple.hpp new file mode 100644 index 0000000..d18d992 --- /dev/null +++ b/src/http/http_client_apple.hpp @@ -0,0 +1,46 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#ifndef HTTP_CLIENT_APPLE_H +#define HTTP_CLIENT_APPLE_H +#if defined(__APPLE__) + +#include "http_client.hpp" + +#include + +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + +using std::string; +using std::list; +using std::mutex; + +namespace snowplow { +/** + * @brief HTTP client for making requests to Snowplow Collector using Apple Core Foundation APIs. + * + * This HTTP client is only compatible with Apple operating systems. + */ +class HttpClientApple : public HttpClient { +public: + ~HttpClientApple() {} + + static const string TRACKER_AGENT; + +protected: + HttpRequestResult http_request(const RequestMethod method, const CrackedUrl url, const string & query_string, const string & post_data, list row_ids, bool oversize); +}; +} + +#endif +#endif diff --git a/src/http/http_client_curl.cpp b/src/http/http_client_curl.cpp new file mode 100644 index 0000000..32bcb66 --- /dev/null +++ b/src/http/http_client_curl.cpp @@ -0,0 +1,78 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#if !defined(WIN32) && !defined(_WIN32) && !defined(__WIN32) || defined(__CYGWIN__) +#include "http_client_curl.hpp" +#include "../constants.hpp" +#include "curl/curl.h" + +using namespace snowplow; +using std::cerr; +using std::endl; +using std::lock_guard; + +HttpClientCurl::HttpClientCurl() { + curl_global_init(CURL_GLOBAL_ALL); +} + +HttpClientCurl::~HttpClientCurl() { + curl_global_cleanup(); +} + +const string HttpClientCurl::TRACKER_AGENT = string("Snowplow C++ Tracker (Unix)"); + +static size_t write_data(void *data, size_t byte_size, size_t n_bytes, std::string *body) { + return byte_size * n_bytes; +} + +HttpRequestResult HttpClientCurl::http_request(const RequestMethod method, CrackedUrl url, const string &query_string, const string &post_data, list row_ids, bool oversize) { + CURL *curl = curl_easy_init(); + if (!curl) { return HttpRequestResult(1, -1, row_ids, oversize); } + + // create the request + std::ostringstream full_url_stream; + full_url_stream << url.to_string(); + + struct curl_slist *headers = NULL; + headers = curl_slist_append(headers, ("User-Agent: " + TRACKER_AGENT).c_str()); + headers = curl_slist_append(headers, "Connection: keep-alive"); + + if (method == GET) { + full_url_stream << '?' << query_string; + curl_easy_setopt(curl, CURLOPT_HTTPGET, 1); + } else { + curl_easy_setopt(curl, CURLOPT_POSTFIELDS, post_data.c_str()); + headers = curl_slist_append(headers, ("Content-Type: " + SNOWPLOW_POST_CONTENT_TYPE).c_str()); + } + + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); + + std::string full_url = full_url_stream.str(); + curl_easy_setopt(curl, CURLOPT_URL, full_url.c_str()); + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data); + + // send the request + CURLcode res = curl_easy_perform(curl); + long status_code; + if (res == CURLE_OK) { + curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &status_code); + } + + // cleanup + curl_easy_cleanup(curl); + curl_slist_free_all(headers); + + return HttpRequestResult(0, status_code, row_ids, oversize); +} + +#endif diff --git a/src/http/http_client_curl.hpp b/src/http/http_client_curl.hpp new file mode 100644 index 0000000..03de194 --- /dev/null +++ b/src/http/http_client_curl.hpp @@ -0,0 +1,47 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#ifndef HTTP_CLIENT_CURL_H +#define HTTP_CLIENT_CURL_H +#if !defined(WIN32) && !defined(_WIN32) && !defined(__WIN32) || defined(__CYGWIN__) + +#include "http_client.hpp" + +#include + +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + +using std::string; +using std::list; +using std::mutex; + +namespace snowplow { +/** + * @brief HTTP client that uses the Curl library for making requests to Snowplow Collector. + * + * This HTTP client supports Unix systems with the curl library installed. + */ +class HttpClientCurl : public HttpClient { +public: + HttpClientCurl(); + ~HttpClientCurl(); + + static const string TRACKER_AGENT; + +protected: + HttpRequestResult http_request(const RequestMethod method, const CrackedUrl url, const string & query_string, const string & post_data, list row_ids, bool oversize); +}; +} + +#endif +#endif diff --git a/src/http/http_client_windows.cpp b/src/http/http_client_windows.cpp new file mode 100644 index 0000000..adca715 --- /dev/null +++ b/src/http/http_client_windows.cpp @@ -0,0 +1,134 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#if defined(WIN32) || defined(_WIN32) || defined(__WIN32) && !defined(__CYGWIN__) +#include "http_client_windows.hpp" + +using namespace snowplow; +using std::cerr; +using std::endl; + +const string HttpClientWindows::TRACKER_AGENT = string("Snowplow C++ Tracker (Win32)"); + +HttpRequestResult HttpClientWindows::http_request(const RequestMethod method, CrackedUrl url, const string &query_string, const string &post_data, list row_ids, bool oversize) { + + HINTERNET h_internet = InternetOpen( + TEXT(HttpClientWindows::TRACKER_AGENT.c_str()), + INTERNET_OPEN_TYPE_DIRECT, + NULL, + NULL, + 0); + + if (h_internet == NULL) { + return HttpRequestResult(GetLastError(), 0, row_ids, oversize); + } + + unsigned int use_port = url.get_port(); + if (url.get_use_default_port()) { + if (url.get_is_https()) { + use_port = INTERNET_DEFAULT_HTTPS_PORT; + } else { + use_port = INTERNET_DEFAULT_HTTP_PORT; + } + } + + HINTERNET h_connect = InternetConnect( + h_internet, + TEXT(url.get_hostname().c_str()), + use_port, + NULL, + NULL, + INTERNET_SERVICE_HTTP, + 0, + NULL); + + if (h_connect == NULL) { + InternetCloseHandle(h_internet); + return HttpRequestResult(GetLastError(), 0, row_ids, oversize); + } + + DWORD flags = 0 | INTERNET_FLAG_RELOAD; + if (url.get_is_https()) { + flags = flags | INTERNET_FLAG_SECURE; + } + + string final_path = url.get_path(); + string request_method_string; + LPVOID post_buf; + int post_buf_len; + if (method == GET) { + request_method_string = "GET"; + post_buf = NULL; + post_buf_len = 0; + final_path += "?" + query_string; + } else { + request_method_string = "POST"; + post_buf = (LPVOID)TEXT(post_data.c_str()); + post_buf_len = strlen(TEXT(post_data.c_str())); + } + + HINTERNET h_request = HttpOpenRequest( + h_connect, + TEXT(request_method_string.c_str()), + TEXT(final_path.c_str()), + NULL, + NULL, + NULL, + flags, + 0); + + if (h_request == NULL) { + InternetCloseHandle(h_internet); + InternetCloseHandle(h_connect); + return HttpRequestResult(GetLastError(), 0, row_ids, oversize); + } + + LPCSTR hdrs = TEXT("Content-Type: application/json; charset=utf-8"); + BOOL is_sent = HttpSendRequest(h_request, hdrs, strlen(hdrs), post_buf, post_buf_len); + + if (!is_sent) { + InternetCloseHandle(h_internet); + InternetCloseHandle(h_connect); + InternetCloseHandle(h_request); + return HttpRequestResult(GetLastError(), 0, row_ids, oversize); + } + + string response; + const int buf_len = 1024; + char buff[buf_len]; + + BOOL is_more = true; + DWORD bytes_read = -1; + + while (is_more && bytes_read != 0) { + is_more = InternetReadFile(h_request, buff, buf_len, &bytes_read); + response.append(buff, bytes_read); + } + + DWORD http_status_code = 0; + DWORD length = sizeof(DWORD); + HttpQueryInfo( + h_request, + HTTP_QUERY_STATUS_CODE | HTTP_QUERY_FLAG_NUMBER, + &http_status_code, + &length, + NULL); + + InternetCloseHandle(h_request); + InternetCloseHandle(h_connect); + InternetCloseHandle(h_internet); + + return HttpRequestResult(0, http_status_code, row_ids, oversize); +} + +#endif diff --git a/src/http/http_client_windows.hpp b/src/http/http_client_windows.hpp new file mode 100644 index 0000000..a2bf03a --- /dev/null +++ b/src/http/http_client_windows.hpp @@ -0,0 +1,46 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#ifndef HTTP_CLIENT_WINDOWS_H +#define HTTP_CLIENT_WINDOWS_H +#if defined(WIN32) || defined(_WIN32) || defined(__WIN32) && !defined(__CYGWIN__) + +#include +#include "http_client.hpp" + +#include +#include +#include + +#pragma comment (lib, "wininet.lib") + +using std::string; +using std::list; + +namespace snowplow { +/** + * @brief HTTP client for making requests to Snowplow Collector using Windows APIs. + * + * This HTTP client is only compatible with Windows. + */ +class HttpClientWindows : public HttpClient { +public: + static const string TRACKER_AGENT; + +protected: + HttpRequestResult http_request(const RequestMethod method, const CrackedUrl url, const string & query_string, const string & post_data, list row_ids, bool oversize); +}; +} + +#endif +#endif diff --git a/src/http_request_result.cpp b/src/http/http_request_result.cpp similarity index 100% rename from src/http_request_result.cpp rename to src/http/http_request_result.cpp diff --git a/src/http_request_result.hpp b/src/http/http_request_result.hpp similarity index 100% rename from src/http_request_result.hpp rename to src/http/http_request_result.hpp diff --git a/src/http_client.cpp b/src/http_client.cpp deleted file mode 100644 index 9c39e95..0000000 --- a/src/http_client.cpp +++ /dev/null @@ -1,263 +0,0 @@ -/* -Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. - -This program is licensed to you under the Apache License Version 2.0, -and you may not use this file except in compliance with the Apache License Version 2.0. -You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. - -Unless required by applicable law or agreed to in writing, -software distributed under the Apache License Version 2.0 is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. -*/ - -#include "http_client.hpp" - -using namespace snowplow; -using std::cerr; -using std::endl; -using std::lock_guard; - -// --- Common - -HttpRequestResult HttpClient::http_post(const CrackedUrl url, const string &post_data, list row_ids, bool oversize) { - HttpRequestResult res = HttpClient::http_request(POST, url, "", post_data, row_ids, oversize); - return res; -} - -HttpRequestResult HttpClient::http_get(const CrackedUrl url, const string &query_string, list row_ids, bool oversize) { - HttpRequestResult res = HttpClient::http_request(GET, url, query_string, "", row_ids, oversize); - return res; -} - -// --- Testing - -#if defined(SNOWPLOW_TEST_SUITE) - -const string HttpClient::TRACKER_AGENT = string("Snowplow C++ Tracker (Integration tests)"); - -list HttpClient::requests_list; -mutex HttpClient::log_read_write; -int HttpClient::response_code = 200; - -HttpRequestResult HttpClient::http_request(const RequestMethod method, CrackedUrl url, const string &query_string, const string &post_data, list row_ids, bool oversize) { - lock_guard guard(log_read_write); - - HttpClient::Request r; - r.method = method; - r.query_string = query_string; - r.post_data = post_data; - r.row_ids = row_ids; - r.oversize = oversize; - requests_list.push_back(r); - - return HttpRequestResult(0, response_code, row_ids, oversize); -} - -void HttpClient::set_http_response_code(int http_response_code) { - lock_guard guard(log_read_write); - response_code = http_response_code; -} - -list HttpClient::get_requests_list() { - lock_guard guard(log_read_write); - return requests_list; -} - -void HttpClient::reset() { - lock_guard guard(log_read_write); - requests_list.clear(); - response_code = 200; -} - -// --- Windows32 - -#elif defined(WIN32) || defined(_WIN32) || defined(__WIN32) && !defined(__CYGWIN__) - -const string HttpClient::TRACKER_AGENT = string("Snowplow C++ Tracker (Win32)"); - -HttpRequestResult HttpClient::http_request(const RequestMethod method, CrackedUrl url, const string &query_string, const string &post_data, list row_ids, bool oversize) { - - HINTERNET h_internet = InternetOpen( - TEXT(HttpClient::TRACKER_AGENT.c_str()), - INTERNET_OPEN_TYPE_DIRECT, - NULL, - NULL, - 0); - - if (h_internet == NULL) { - return HttpRequestResult(GetLastError(), 0, row_ids, oversize); - } - - unsigned int use_port = url.get_port(); - if (url.get_use_default_port()) { - if (url.get_is_https()) { - use_port = INTERNET_DEFAULT_HTTPS_PORT; - } else { - use_port = INTERNET_DEFAULT_HTTP_PORT; - } - } - - HINTERNET h_connect = InternetConnect( - h_internet, - TEXT(url.get_hostname().c_str()), - use_port, - NULL, - NULL, - INTERNET_SERVICE_HTTP, - 0, - NULL); - - if (h_connect == NULL) { - InternetCloseHandle(h_internet); - return HttpRequestResult(GetLastError(), 0, row_ids, oversize); - } - - DWORD flags = 0 | INTERNET_FLAG_RELOAD; - if (url.get_is_https()) { - flags = flags | INTERNET_FLAG_SECURE; - } - - string final_path = url.get_path(); - string request_method_string; - LPVOID post_buf; - int post_buf_len; - if (method == GET) { - request_method_string = "GET"; - post_buf = NULL; - post_buf_len = 0; - final_path += "?" + query_string; - } else { - request_method_string = "POST"; - post_buf = (LPVOID)TEXT(post_data.c_str()); - post_buf_len = strlen(TEXT(post_data.c_str())); - } - - HINTERNET h_request = HttpOpenRequest( - h_connect, - TEXT(request_method_string.c_str()), - TEXT(final_path.c_str()), - NULL, - NULL, - NULL, - flags, - 0); - - if (h_request == NULL) { - InternetCloseHandle(h_internet); - InternetCloseHandle(h_connect); - return HttpRequestResult(GetLastError(), 0, row_ids, oversize); - } - - LPCSTR hdrs = TEXT("Content-Type: application/json; charset=utf-8"); - BOOL is_sent = HttpSendRequest(h_request, hdrs, strlen(hdrs), post_buf, post_buf_len); - - if (!is_sent) { - InternetCloseHandle(h_internet); - InternetCloseHandle(h_connect); - InternetCloseHandle(h_request); - return HttpRequestResult(GetLastError(), 0, row_ids, oversize); - } - - string response; - const int buf_len = 1024; - char buff[buf_len]; - - BOOL is_more = true; - DWORD bytes_read = -1; - - while (is_more && bytes_read != 0) { - is_more = InternetReadFile(h_request, buff, buf_len, &bytes_read); - response.append(buff, bytes_read); - } - - DWORD http_status_code = 0; - DWORD length = sizeof(DWORD); - HttpQueryInfo( - h_request, - HTTP_QUERY_STATUS_CODE | HTTP_QUERY_FLAG_NUMBER, - &http_status_code, - &length, - NULL); - - InternetCloseHandle(h_request); - InternetCloseHandle(h_connect); - InternetCloseHandle(h_internet); - - return HttpRequestResult(0, http_status_code, row_ids, oversize); -} - -// --- macOS - -#elif defined(__APPLE__) - -const string HttpClient::TRACKER_AGENT = string("Snowplow C++ Tracker (macOS)"); - -HttpRequestResult HttpClient::http_request(const RequestMethod method, CrackedUrl url, const string &query_string, const string &post_data, list row_ids, bool oversize) { - - // Get final url - string final_url = url.to_string(); - if (method == GET) { - final_url += "?" + query_string; - } - - // Create request - CFStringRef cf_url_str = CFStringCreateWithBytes(kCFAllocatorDefault, (const unsigned char *)final_url.c_str(), final_url.length(), kCFStringEncodingUTF8, false); - CFStringRef cf_content_type_str = CFStringCreateWithBytes(kCFAllocatorDefault, (const unsigned char *)SNOWPLOW_POST_CONTENT_TYPE.c_str(), SNOWPLOW_POST_CONTENT_TYPE.length(), kCFStringEncodingUTF8, false); - CFStringRef cf_user_agent_str = CFStringCreateWithBytes(kCFAllocatorDefault, (const unsigned char *)HttpClient::TRACKER_AGENT.c_str(), HttpClient::TRACKER_AGENT.length(), kCFStringEncodingUTF8, false); - - CFURLRef cf_url = CFURLCreateWithString(kCFAllocatorDefault, cf_url_str, NULL); - CFHTTPMessageRef cf_http_req; - - if (method == GET) { - cf_http_req = CFHTTPMessageCreateRequest(kCFAllocatorDefault, CFSTR("GET"), cf_url, kCFHTTPVersion1_1); - } else { - cf_http_req = CFHTTPMessageCreateRequest(kCFAllocatorDefault, CFSTR("POST"), cf_url, kCFHTTPVersion1_1); - CFDataRef cf_post_data = CFDataCreate(kCFAllocatorDefault, (const UInt8 *)post_data.data(), post_data.size()); - CFHTTPMessageSetBody(cf_http_req, cf_post_data); - if (cf_post_data) { - CFRelease(cf_post_data); - } - CFHTTPMessageSetHeaderFieldValue(cf_http_req, CFSTR("Content-Type"), cf_content_type_str); - } - CFHTTPMessageSetHeaderFieldValue(cf_http_req, CFSTR("User-Agent"), cf_user_agent_str); - CFHTTPMessageSetHeaderFieldValue(cf_http_req, CFSTR("Connection"), CFSTR("keep-alive")); - - CFReadStreamRef cf_read_stream = CFReadStreamCreateForHTTPRequest(kCFAllocatorDefault, cf_http_req); - CFMutableDataRef cf_data_resp = CFDataCreateMutable(kCFAllocatorDefault, 0); - - // Send request - CFReadStreamOpen(cf_read_stream); - CFIndex num_bytes_read; - do { - const int buff_size = 1024; - UInt8 buff[buff_size]; - num_bytes_read = CFReadStreamRead(cf_read_stream, buff, buff_size); - - if (num_bytes_read > 0) { - CFDataAppendBytes(cf_data_resp, buff, num_bytes_read); - } else if (num_bytes_read < 0) { - CFStreamError error = CFReadStreamGetError(cf_read_stream); - cerr << error.error << endl; - } - } while (num_bytes_read > 0); - - // Process result - CFHTTPMessageRef cf_http_resp = (CFHTTPMessageRef)CFReadStreamCopyProperty(cf_read_stream, kCFStreamPropertyHTTPResponseHeader); - int cf_status_code = CFHTTPMessageGetResponseStatusCode(cf_http_resp); - - // Release resources - CFReadStreamClose(cf_read_stream); - CFRelease(cf_url_str); - CFRelease(cf_content_type_str); - CFRelease(cf_user_agent_str); - CFRelease(cf_url); - CFRelease(cf_http_req); - CFRelease(cf_read_stream); - CFRelease(cf_data_resp); - CFRelease(cf_http_resp); - - return HttpRequestResult(0, cf_status_code, row_ids, oversize); -} - -#endif diff --git a/test/emitter_test.cpp b/test/emitter_test.cpp index 17efbb2..414b290 100644 --- a/test/emitter_test.cpp +++ b/test/emitter_test.cpp @@ -12,10 +12,12 @@ See the Apache License Version 2.0 for the specific language governing permissio */ #include "../src/emitter.hpp" +#include "http/test_http_client.hpp" #include "catch.hpp" using namespace snowplow; using std::invalid_argument; +using std::unique_ptr; TEST_CASE("emitter") { SECTION("Emitter rejects urls (starting with http:// or https://)") { @@ -25,25 +27,25 @@ TEST_CASE("emitter") { bool inv_arg_https_case = false; try { - Emitter emitter("http://com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 51000, "test-emitter.db"); + Emitter emitter("http://com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 51000, "test-emitter.db", unique_ptr(new TestHttpClient())); } catch (invalid_argument) { inv_arg_http = true; } try { - Emitter emitter("https://com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 51000, "test-emitter.db"); + Emitter emitter("https://com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 51000, "test-emitter.db", unique_ptr(new TestHttpClient())); } catch (invalid_argument) { inv_arg_https = true; } try { - Emitter emitter("HTTP://com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 51000, "test-emitter.db"); + Emitter emitter("HTTP://com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 51000, "test-emitter.db", unique_ptr(new TestHttpClient())); } catch (invalid_argument) { inv_arg_http_case = true; } try { - Emitter emitter("HTTPS://com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 51000, "test-emitter.db"); + Emitter emitter("HTTPS://com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 51000, "test-emitter.db", unique_ptr(new TestHttpClient())); } catch (invalid_argument) { inv_arg_https_case = true; } @@ -55,7 +57,7 @@ TEST_CASE("emitter") { } SECTION("Emitter setup confirmation") { - Emitter emitter("com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 51000, "test-emitter.db"); + Emitter emitter("com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 51000, "test-emitter.db", unique_ptr(new TestHttpClient())); REQUIRE(false == emitter.is_running()); REQUIRE("http://com.acme.collector/com.snowplowanalytics.snowplow/tp2" == emitter.get_cracked_url().to_string()); @@ -80,7 +82,7 @@ TEST_CASE("emitter") { emitter.flush(); REQUIRE(false == emitter.is_running()); - Emitter emitter_1("com.acme.collector", Emitter::Method::GET, Emitter::Protocol::HTTPS, 500, 52000, 51000, "test-emitter.db"); + Emitter emitter_1("com.acme.collector", Emitter::Method::GET, Emitter::Protocol::HTTPS, 500, 52000, 51000, "test-emitter.db", unique_ptr(new TestHttpClient())); REQUIRE(false == emitter_1.is_running()); REQUIRE("https://com.acme.collector/i" == emitter_1.get_cracked_url().to_string()); @@ -91,7 +93,7 @@ TEST_CASE("emitter") { bool inv_argument_empty_uri = false; try { - Emitter emitter_2("", Emitter::Method::GET, Emitter::Protocol::HTTPS, 500, 52000, 51000, "test-emitter.db"); + Emitter emitter_2("", Emitter::Method::GET, Emitter::Protocol::HTTPS, 500, 52000, 51000, "test-emitter.db", unique_ptr(new TestHttpClient())); } catch (invalid_argument) { inv_argument_empty_uri = true; } @@ -99,17 +101,15 @@ TEST_CASE("emitter") { bool inv_argument_bad_url = false; try { - Emitter emitter_3("../:random../gibber", Emitter::Method::GET, Emitter::Protocol::HTTPS, 500, 52000, 51000, "test-emitter.db"); + Emitter emitter_3("../:random../gibber", Emitter::Method::GET, Emitter::Protocol::HTTPS, 500, 52000, 51000, "test-emitter.db", unique_ptr(new TestHttpClient())); } catch (invalid_argument) { inv_argument_bad_url = true; } REQUIRE(inv_argument_bad_url == true); } -#if defined(SNOWPLOW_TEST_SUITE) - SECTION("Emitter should track and remove only successful events from the database for GET requests") { - Emitter e("com.acme.collector", Emitter::Method::GET, Emitter::Protocol::HTTPS, 500, 52000, 52000, "test-emitter.db"); + Emitter e("com.acme.collector", Emitter::Method::GET, Emitter::Protocol::HTTPS, 500, 52000, 52000, "test-emitter.db", unique_ptr(new TestHttpClient())); e.start(); Payload p; @@ -120,7 +120,7 @@ TEST_CASE("emitter") { } e.flush(); - list requests = HttpClient::get_requests_list(); + list requests = TestHttpClient::get_requests_list(); REQUIRE(0 != requests.size()); list *event_list = new list; @@ -128,7 +128,7 @@ TEST_CASE("emitter") { REQUIRE(0 == event_list->size()); event_list->clear(); - HttpClient::set_http_response_code(404); + TestHttpClient::set_http_response_code(404); e.start(); for (int i = 0; i < 10; i++) { @@ -141,12 +141,12 @@ TEST_CASE("emitter") { event_list->clear(); e.stop(); - HttpClient::reset(); + TestHttpClient::reset(); delete (event_list); } SECTION("Emitter should track and remove only successful events from the database for POST requests") { - Emitter e("com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 500, 500, "test-emitter.db"); + Emitter e("com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 500, 500, "test-emitter.db", unique_ptr(new TestHttpClient())); e.start(); Payload p; @@ -157,7 +157,7 @@ TEST_CASE("emitter") { } e.flush(); - list requests = HttpClient::get_requests_list(); + list requests = TestHttpClient::get_requests_list(); REQUIRE(0 != requests.size()); list *event_list = new list; @@ -166,7 +166,7 @@ TEST_CASE("emitter") { event_list->clear(); // Test POST 404 response - HttpClient::set_http_response_code(404); + TestHttpClient::set_http_response_code(404); e.start(); for (int i = 0; i < 10; i++) { @@ -179,7 +179,7 @@ TEST_CASE("emitter") { event_list->clear(); e.stop(); - HttpClient::reset(); + TestHttpClient::reset(); // Test POST combination logic for (int i = 0; i < 1000; i++) { @@ -197,7 +197,7 @@ TEST_CASE("emitter") { p.add("tv", "pvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpv"); e.add(p); - HttpClient::set_http_response_code(404); + TestHttpClient::set_http_response_code(404); e.start(); e.flush(); @@ -206,9 +206,7 @@ TEST_CASE("emitter") { REQUIRE(0 == event_list->size()); event_list->clear(); - HttpClient::reset(); + TestHttpClient::reset(); delete (event_list); } - -#endif } diff --git a/test/http_client_test.cpp b/test/http/http_client_test.cpp similarity index 73% rename from test/http_client_test.cpp rename to test/http/http_client_test.cpp index 21fc603..a7eb505 100644 --- a/test/http_client_test.cpp +++ b/test/http/http_client_test.cpp @@ -11,62 +11,58 @@ software distributed under the Apache License Version 2.0 is distributed on an See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. */ -#include "../src/http_client.hpp" -#include "catch.hpp" +#include "test_http_client.hpp" +#include "../catch.hpp" using namespace snowplow; -#if defined(SNOWPLOW_TEST_SUITE) - #define HTTP_TEST_URL_GET "http://com.acme.collector/i" #define HTTP_TEST_URL_POST "http://com.acme.collector/com.snowplowanalytics.snowplow/tp2" TEST_CASE("http_client") { SECTION("GET request to valid endpoint must return 200 code") { - HttpClient::reset(); + TestHttpClient::reset(); CrackedUrl c(HTTP_TEST_URL_GET); string query_string = "e=pv&tv=cpp-0.1.0"; list id_list{1}; - HttpRequestResult r = HttpClient::http_get(c, query_string, id_list, false); + HttpRequestResult r = TestHttpClient().http_get(c, query_string, id_list, false); REQUIRE(r.get_http_response_code() == 200); REQUIRE(r.is_success() == true); REQUIRE(r.get_row_ids().size() == id_list.size()); - list requests_list = HttpClient::get_requests_list(); + list requests_list = TestHttpClient::get_requests_list(); REQUIRE(1 == requests_list.size()); - HttpClient::Request req = requests_list.front(); - REQUIRE(HttpClient::RequestMethod::GET == req.method); + TestHttpClient::Request req = requests_list.front(); + REQUIRE(TestHttpClient::RequestMethod::GET == req.method); REQUIRE(query_string == req.query_string); REQUIRE("" == req.post_data); REQUIRE(false == req.oversize); } SECTION("POST request to valid endpoint must return 200 code") { - HttpClient::reset(); + TestHttpClient::reset(); CrackedUrl c(HTTP_TEST_URL_POST); string json_string = "{\"schema\":\"iglu:com.snowplowanalytics.snowplow/payload_data/jsonschema/1-0-3\",\"data\":[{\"dtm\":\"1234567890123\"}]}"; list id_list{1}; - HttpRequestResult r = HttpClient::http_post(c, json_string, id_list, false); + HttpRequestResult r = TestHttpClient().http_post(c, json_string, id_list, false); REQUIRE(r.get_http_response_code() == 200); REQUIRE(r.is_success() == true); REQUIRE(r.get_row_ids().size() == id_list.size()); - list requests_list = HttpClient::get_requests_list(); + list requests_list = TestHttpClient::get_requests_list(); REQUIRE(1 == requests_list.size()); - HttpClient::Request req = requests_list.front(); - REQUIRE(HttpClient::RequestMethod::POST == req.method); + TestHttpClient::Request req = requests_list.front(); + REQUIRE(TestHttpClient::RequestMethod::POST == req.method); REQUIRE("" == req.query_string); REQUIRE(json_string == req.post_data); REQUIRE(false == req.oversize); } } - -#endif diff --git a/test/http_request_result_test.cpp b/test/http/http_request_result_test.cpp similarity index 96% rename from test/http_request_result_test.cpp rename to test/http/http_request_result_test.cpp index 4685700..257fe63 100644 --- a/test/http_request_result_test.cpp +++ b/test/http/http_request_result_test.cpp @@ -11,8 +11,8 @@ software distributed under the Apache License Version 2.0 is distributed on an See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. */ -#include "../src/http_request_result.hpp" -#include "catch.hpp" +#include "../../src/http/http_request_result.hpp" +#include "../catch.hpp" using namespace snowplow; diff --git a/test/http/test_http_client.cpp b/test/http/test_http_client.cpp new file mode 100644 index 0000000..c9f456f --- /dev/null +++ b/test/http/test_http_client.cpp @@ -0,0 +1,55 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#include "test_http_client.hpp" + +using namespace snowplow; +using std::cerr; +using std::endl; +using std::lock_guard; + +const string TestHttpClient::TRACKER_AGENT = string("Snowplow C++ Tracker (Integration tests)"); + +list TestHttpClient::requests_list; +mutex TestHttpClient::log_read_write; +int TestHttpClient::response_code = 200; + +HttpRequestResult TestHttpClient::http_request(const RequestMethod method, CrackedUrl url, const string &query_string, const string &post_data, list row_ids, bool oversize) { + lock_guard guard(log_read_write); + + TestHttpClient::Request r; + r.method = method; + r.query_string = query_string; + r.post_data = post_data; + r.row_ids = row_ids; + r.oversize = oversize; + requests_list.push_back(r); + + return HttpRequestResult(0, response_code, row_ids, oversize); +} + +void TestHttpClient::set_http_response_code(int http_response_code) { + lock_guard guard(log_read_write); + response_code = http_response_code; +} + +list TestHttpClient::get_requests_list() { + lock_guard guard(log_read_write); + return requests_list; +} + +void TestHttpClient::reset() { + lock_guard guard(log_read_write); + requests_list.clear(); + response_code = 200; +} diff --git a/src/http_client.hpp b/test/http/test_http_client.hpp similarity index 52% rename from src/http_client.hpp rename to test/http/test_http_client.hpp index 50d7b9d..26f569b 100644 --- a/src/http_client.hpp +++ b/test/http/test_http_client.hpp @@ -11,42 +11,13 @@ software distributed under the Apache License Version 2.0 is distributed on an See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. */ -#ifndef HTTP_CLIENT_H -#define HTTP_CLIENT_H +#ifndef HTTP_CLIENT_TEST_H +#define HTTP_CLIENT_TEST_H -#include -#include -#include -#include "constants.hpp" -#include "cracked_url.hpp" -#include "http_request_result.hpp" +#include "../../src/http/http_client.hpp" -#if defined(SNOWPLOW_TEST_SUITE) - -#include -#include #include -#elif defined(WIN32) || defined(_WIN32) || defined(__WIN32) && !defined(__CYGWIN__) - -#include -#include -#include - -#pragma comment (lib, "wininet.lib") - -#elif defined(__APPLE__) - -#include -#include -#include -#include -#include - -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" - -#endif - using std::string; using std::list; using std::mutex; @@ -55,15 +26,12 @@ namespace snowplow { /** * @brief HTTP client for making requests to Snowplow Collector. To be used internally within tracker only. */ -class HttpClient { +class TestHttpClient : public HttpClient { public: - enum RequestMethod { POST, GET }; - + ~TestHttpClient() {} + static const string TRACKER_AGENT; - static HttpRequestResult http_post(const CrackedUrl url, const string & post_data, list row_ids, bool oversize); - static HttpRequestResult http_get(const CrackedUrl url, const string & query_string, list row_ids, bool oversize); -#if defined(SNOWPLOW_TEST_SUITE) struct Request { Request(){}; RequestMethod method; @@ -80,10 +48,9 @@ class HttpClient { static void set_http_response_code(int http_response_code); static list get_requests_list(); static void reset(); -#endif -private: - static HttpRequestResult http_request(const RequestMethod method, const CrackedUrl url, const string & query_string, const string & post_data, list row_ids, bool oversize); +protected: + HttpRequestResult http_request(const RequestMethod method, const CrackedUrl url, const string & query_string, const string & post_data, list row_ids, bool oversize); }; } diff --git a/test/tracker_test.cpp b/test/tracker_test.cpp index f6a94d4..e3ab1f4 100644 --- a/test/tracker_test.cpp +++ b/test/tracker_test.cpp @@ -15,6 +15,7 @@ See the Apache License Version 2.0 for the specific language governing permissio #include "../include/json.hpp" #include "../src/emitter.hpp" #include "../src/tracker.hpp" +#include "http/test_http_client.hpp" #include "catch.hpp" using namespace snowplow; @@ -32,7 +33,7 @@ TEST_CASE("tracker") { vector m_payloads; public: - MockEmitter() : Emitter("com.acme", Emitter::Method::POST, Emitter::Protocol::HTTP, 0, 0, 0, "test-tracker.db") {} + MockEmitter() : Emitter("com.acme", Emitter::Method::POST, Emitter::Protocol::HTTP, 0, 0, 0, "test-tracker.db", unique_ptr(new TestHttpClient())) {} void start() { m_started = true; } void stop() { m_started = false; } void add(Payload payload) { m_payloads.push_back(payload); } From 7c318fdd3aaa0e2c13828e4cf97d808c8752c02c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=BA=C5=A1=20Tomlein?= Date: Fri, 8 Apr 2022 10:19:04 +0200 Subject: [PATCH 02/35] Use a common Event base class for all event types (close #51) PR #52 --- README.md | 8 +- examples/main.cpp | 18 +- performance/logs.txt | 1 + performance/run.cpp | 18 +- snowplow-cpp-tracker-example.vcxproj | 20 +- snowplow-cpp-tracker-example.vcxproj.filters | 44 ++- snowplow-cpp-tracker.vcxproj | 25 +- snowplow-cpp-tracker.vcxproj.filters | 51 +++- src/client_session.hpp | 2 +- src/constants.hpp | 4 +- src/emitter.hpp | 4 +- src/events/event.cpp | 68 +++++ src/events/event.hpp | 91 ++++++ src/events/screen_view_event.cpp | 39 +++ src/events/screen_view_event.hpp | 50 ++++ src/events/self_describing_event.cpp | 23 ++ src/events/self_describing_event.hpp | 52 ++++ src/events/structured_event.cpp | 52 ++++ src/events/structured_event.hpp | 71 +++++ src/events/timing_event.cpp | 46 ++++ src/events/timing_event.hpp | 64 +++++ src/payload/event_payload.cpp | 34 +++ src/payload/event_payload.hpp | 55 ++++ src/{ => payload}/payload.cpp | 10 +- src/{ => payload}/payload.hpp | 10 +- src/{ => payload}/self_describing_json.cpp | 4 +- src/{ => payload}/self_describing_json.hpp | 10 +- src/storage.hpp | 2 +- src/subject.hpp | 2 +- src/tracker.cpp | 158 +---------- src/tracker.hpp | 260 +----------------- src/utils.hpp | 4 +- test/payload/event_payload_test.cpp | 33 +++ test/{ => payload}/payload_test.cpp | 7 +- .../self_describing_json_test.cpp | 4 +- test/tracker_test.cpp | 143 +++++----- 36 files changed, 955 insertions(+), 532 deletions(-) create mode 100644 src/events/event.cpp create mode 100644 src/events/event.hpp create mode 100644 src/events/screen_view_event.cpp create mode 100644 src/events/screen_view_event.hpp create mode 100644 src/events/self_describing_event.cpp create mode 100644 src/events/self_describing_event.hpp create mode 100644 src/events/structured_event.cpp create mode 100644 src/events/structured_event.hpp create mode 100644 src/events/timing_event.cpp create mode 100644 src/events/timing_event.hpp create mode 100644 src/payload/event_payload.cpp create mode 100644 src/payload/event_payload.hpp rename src/{ => payload}/payload.cpp (83%) rename src/{ => payload}/payload.hpp (88%) rename src/{ => payload}/self_describing_json.cpp (92%) rename src/{ => payload}/self_describing_json.hpp (90%) create mode 100644 test/payload/event_payload_test.cpp rename test/{ => payload}/payload_test.cpp (94%) rename test/{ => payload}/self_describing_json_test.cpp (93%) diff --git a/README.md b/README.md index 57cba35..358562a 100644 --- a/README.md +++ b/README.md @@ -48,14 +48,14 @@ Track custom events (see the documentation for the full list of supported event ```cpp // structured event -Tracker::StructuredEvent se("category", "action"); -tracker->track_struct_event(se); +StructuredEvent se("category", "action"); +tracker->track(se); // screen view event -Tracker::ScreenViewEvent sve; +ScreenViewEvent sve; string name = "Screen ID - 5asd56"; sve.name = &name; -tracker->track_screen_view(sve); +tracker->track(sve); ``` Check the tracked events in a [Snowplow Micro](https://docs.snowplowanalytics.com/docs/understanding-your-pipeline/what-is-snowplow-micro/) or [Snowplow Mini](https://docs.snowplowanalytics.com/docs/understanding-your-pipeline/what-is-snowplow-mini/) instance. diff --git a/examples/main.cpp b/examples/main.cpp index 8751ed3..593149a 100644 --- a/examples/main.cpp +++ b/examples/main.cpp @@ -3,11 +3,17 @@ #include #include "../src/tracker.hpp" +#include "../src/events/structured_event.hpp" +#include "../src/events/timing_event.hpp" +#include "../src/events/screen_view_event.hpp" using snowplow::ClientSession; using snowplow::Emitter; using snowplow::Subject; using snowplow::Tracker; +using snowplow::StructuredEvent; +using snowplow::ScreenViewEvent; +using snowplow::TimingEvent; using std::cout; using std::endl; using std::string; @@ -52,21 +58,21 @@ int main(int argc, char **argv) { time(&start); for (int i = 0; i < 2000; i++) { - Tracker::TimingEvent te("timing-cat", "timing-var", 123); + TimingEvent te("timing-cat", "timing-var", 123); - Tracker::ScreenViewEvent sve; + ScreenViewEvent sve; string name = "Screen ID - 5asd56"; sve.name = &name; - Tracker::StructuredEvent se("shop", "add-to-basket"); + StructuredEvent se("shop", "add-to-basket"); string property = "pcs"; double value = 25.6; se.property = &property; se.value = &value; - t->track_timing(te); - t->track_screen_view(sve); - t->track_struct_event(se); + t->track(te); + t->track(sve); + t->track(se); } time(&end); diff --git a/performance/logs.txt b/performance/logs.txt index aff86cd..1ed166c 100644 --- a/performance/logs.txt +++ b/performance/logs.txt @@ -5,3 +5,4 @@ {"desktop_context":{"data":{"deviceManufacturer":"Apple Inc.","deviceModel":"MacBookPro17,1","deviceProcessorCount":8,"osIs64Bit":true,"osServicePack":"","osType":"macOS","osVersion":"12.2.0"},"schema":"iglu:com.snowplowanalytics.snowplow/desktop_context/jsonschema/1-0-0"},"results":{"mocked_emitter_and_mocked_session":5.169725125,"mocked_emitter_and_real_session":5.426304083,"mute_emitter_and_mocked_session":27.828702709,"mute_emitter_and_real_session":32.127459,"num_operations":10000,"num_threads":5},"timestamp":1645006143834,"tracker_version":"cpp-0.1.0"} {"desktop_context":{"data":{"deviceManufacturer":"Apple Inc.","deviceModel":"MacBookPro17,1","deviceProcessorCount":8,"osIs64Bit":true,"osServicePack":"","osType":"macOS","osVersion":"12.2.0"},"schema":"iglu:com.snowplowanalytics.snowplow/desktop_context/jsonschema/1-0-0"},"results":{"mocked_emitter_and_mocked_session":5.742478167,"mocked_emitter_and_real_session":5.050215375,"mute_emitter_and_mocked_session":25.8688495,"mute_emitter_and_real_session":19.645383417,"num_operations":10000,"num_threads":5},"timestamp":1645041474207,"tracker_version":"cpp-0.1.0"} {"desktop_context":{"data":{"deviceManufacturer":"Apple Inc.","deviceModel":"MacBookPro17,1","deviceProcessorCount":8,"osIs64Bit":true,"osServicePack":"","osType":"macOS","osVersion":"12.2.0"},"schema":"iglu:com.snowplowanalytics.snowplow/desktop_context/jsonschema/1-0-0"},"results":{"mocked_emitter_and_mocked_session":6.493122875,"mocked_emitter_and_real_session":6.679922959,"mute_emitter_and_mocked_session":18.915125958,"mute_emitter_and_real_session":17.245701792,"num_operations":10000,"num_threads":5},"timestamp":1648794323772,"tracker_version":"cpp-0.2.0"} +{"desktop_context":{"data":{"deviceManufacturer":"Apple Inc.","deviceModel":"MacBookPro17,1","deviceProcessorCount":8,"osIs64Bit":true,"osServicePack":"","osType":"macOS","osVersion":"12.2.0"},"schema":"iglu:com.snowplowanalytics.snowplow/desktop_context/jsonschema/1-0-0"},"results":{"mocked_emitter_and_mocked_session":6.519524667,"mocked_emitter_and_real_session":6.452258541,"mute_emitter_and_mocked_session":21.564418917,"mute_emitter_and_real_session":18.91465825,"num_operations":10000,"num_threads":5},"timestamp":1649249418985,"tracker_version":"cpp-0.2.0"} diff --git a/performance/run.cpp b/performance/run.cpp index 56c934e..88d56f5 100644 --- a/performance/run.cpp +++ b/performance/run.cpp @@ -16,6 +16,9 @@ See the Apache License Version 2.0 for the specific language governing permissio #include "../src/subject.hpp" #include "../src/tracker.hpp" +#include "../src/events/structured_event.hpp" +#include "../src/events/timing_event.hpp" +#include "../src/events/screen_view_event.hpp" #include "mock_client_session.hpp" #include "mock_emitter.hpp" #include "mute_emitter.hpp" @@ -26,6 +29,9 @@ using snowplow::Emitter; using snowplow::Storage; using snowplow::Subject; using snowplow::Tracker; +using snowplow::ScreenViewEvent; +using snowplow::StructuredEvent; +using snowplow::TimingEvent; using std::vector; using std::chrono::duration; using std::chrono::high_resolution_clock; @@ -36,21 +42,21 @@ void track_events() { Tracker *tracker = Tracker::instance(); for (int i = 0; i < NUM_OPERATIONS; i++) { - Tracker::TimingEvent te("timing-cat", "timing-var", 123); + TimingEvent te("timing-cat", "timing-var", 123); - Tracker::ScreenViewEvent sve; + ScreenViewEvent sve; string name = "Screen ID - 5asd56"; sve.name = &name; - Tracker::StructuredEvent se("shop", "add-to-basket"); + StructuredEvent se("shop", "add-to-basket"); string property = "pcs"; double value = 25.6; se.property = &property; se.value = &value; - tracker->track_timing(te); - tracker->track_screen_view(sve); - tracker->track_struct_event(se); + tracker->track(te); + tracker->track(sve); + tracker->track(se); } } diff --git a/snowplow-cpp-tracker-example.vcxproj b/snowplow-cpp-tracker-example.vcxproj index 0110a9e..67094cc 100644 --- a/snowplow-cpp-tracker-example.vcxproj +++ b/snowplow-cpp-tracker-example.vcxproj @@ -105,8 +105,14 @@ - - + + + + + + + + @@ -123,8 +129,14 @@ - - + + + + + + + + diff --git a/snowplow-cpp-tracker-example.vcxproj.filters b/snowplow-cpp-tracker-example.vcxproj.filters index 7c7f955..3a0959b 100644 --- a/snowplow-cpp-tracker-example.vcxproj.filters +++ b/snowplow-cpp-tracker-example.vcxproj.filters @@ -30,10 +30,28 @@ Source Files - + Source Files - + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + Source Files @@ -80,12 +98,30 @@ Header Files - + Header Files - + Header Files + + Header Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + Header Files diff --git a/snowplow-cpp-tracker.vcxproj b/snowplow-cpp-tracker.vcxproj index c4588ef..6b2e992 100644 --- a/snowplow-cpp-tracker.vcxproj +++ b/snowplow-cpp-tracker.vcxproj @@ -109,8 +109,14 @@ - - + + + + + + + + @@ -122,8 +128,9 @@ - - + + + @@ -140,8 +147,14 @@ - - + + + + + + + + diff --git a/snowplow-cpp-tracker.vcxproj.filters b/snowplow-cpp-tracker.vcxproj.filters index 74939d1..865f56c 100644 --- a/snowplow-cpp-tracker.vcxproj.filters +++ b/snowplow-cpp-tracker.vcxproj.filters @@ -30,10 +30,28 @@ Source Files - + Source Files - + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + Source Files @@ -75,10 +93,13 @@ Source Files - + + Source Files + + Source Files - + Source Files @@ -116,12 +137,30 @@ Header Files - + + Header Files + + Header Files - + Header Files + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + Header Files diff --git a/src/client_session.hpp b/src/client_session.hpp index 4a8b4a3..319837e 100644 --- a/src/client_session.hpp +++ b/src/client_session.hpp @@ -16,7 +16,7 @@ See the Apache License Version 2.0 for the specific language governing permissio #include #include -#include "self_describing_json.hpp" +#include "payload/self_describing_json.hpp" #include "../include/json.hpp" using std::string; diff --git a/src/constants.hpp b/src/constants.hpp index 1da4939..c243805 100644 --- a/src/constants.hpp +++ b/src/constants.hpp @@ -33,14 +33,12 @@ const string SNOWPLOW_GET_PROTOCOL_PATH = "i"; const string SNOWPLOW_SCHEMA_PAYLOAD_DATA = "iglu:com.snowplowanalytics.snowplow/payload_data/jsonschema/1-0-4"; const string SNOWPLOW_SCHEMA_CONTEXTS = "iglu:com.snowplowanalytics.snowplow/contexts/jsonschema/1-0-1"; const string SNOWPLOW_SCHEMA_UNSTRUCT_EVENT = "iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0"; -const string SNOWPLOW_SCHEMA_SCREEN_VIEW = "iglu:com.snowplowanalytics.snowplow/screen_view/jsonschema/1-0-0"; -const string SNOWPLOW_SCHEMA_USER_TIMINGS = "iglu:com.snowplowanalytics.snowplow/timing/jsonschema/1-0-0"; const string SNOWPLOW_SCHEMA_CLIENT_SESSION = "iglu:com.snowplowanalytics.snowplow/client_session/jsonschema/1-0-1"; const string SNOWPLOW_SCHEMA_DESKTOP_CONTEXT = "iglu:com.snowplowanalytics.snowplow/desktop_context/jsonschema/1-0-0"; // event types const string SNOWPLOW_EVENT_STRUCTURED = "se"; -const string SNOWPLOW_EVENT_UNSTRUCTURED = "ue"; +const string SNOWPLOW_EVENT_SELF_DESCRIBING = "ue"; const string SNOWPLOW_EVENT_PAGE_VIEW = "pv"; const string SNOWPLOW_CONTEXT = "co"; diff --git a/src/emitter.hpp b/src/emitter.hpp index 468f17e..8f67cbc 100644 --- a/src/emitter.hpp +++ b/src/emitter.hpp @@ -23,8 +23,8 @@ See the Apache License Version 2.0 for the specific language governing permissio #include "constants.hpp" #include "utils.hpp" #include "storage.hpp" -#include "payload.hpp" -#include "self_describing_json.hpp" +#include "payload/payload.hpp" +#include "payload/self_describing_json.hpp" #include "cracked_url.hpp" #include "http/http_request_result.hpp" #include "http/http_client.hpp" diff --git a/src/events/event.cpp b/src/events/event.cpp new file mode 100644 index 0000000..c917e25 --- /dev/null +++ b/src/events/event.cpp @@ -0,0 +1,68 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#include "event.hpp" +#include "../utils.hpp" + +using namespace snowplow; +using std::to_string; +using std::invalid_argument; + +Event::Event() { + this->m_true_timestamp = NULL; +} + +// --- Getters + +EventPayload Event::get_payload(bool use_base64) const { + EventPayload p = get_custom_event_payload(use_base64); + + if (!p.get().count(SNOWPLOW_EVENT)) { + throw invalid_argument("Missing event type"); + } + + auto *true_timestamp = get_true_timestamp(); + if (true_timestamp != NULL) { + p.add(SNOWPLOW_TRUE_TIMESTAMP, to_string(*true_timestamp)); + } + + return p; +} + +EventPayload Event::get_self_describing_event_payload(const SelfDescribingJson &event, bool use_base64) const { + EventPayload p; + p.add(SNOWPLOW_EVENT, SNOWPLOW_EVENT_SELF_DESCRIBING); + + SelfDescribingJson sdj(SNOWPLOW_SCHEMA_UNSTRUCT_EVENT, event.get()); + p.add_json(sdj.get(), use_base64, SNOWPLOW_UNSTRUCTURED_ENCODED, SNOWPLOW_UNSTRUCTURED); + + return p; +} + +vector Event::get_context() const { + return m_context; +} + +unsigned long long *Event::get_true_timestamp() const { + return m_true_timestamp; +} + +// --- Setters + +void Event::set_true_timestamp(unsigned long long *true_timestamp) { + m_true_timestamp = true_timestamp; +} + +void Event::set_context(const vector &context) { + m_context = context; +} diff --git a/src/events/event.hpp b/src/events/event.hpp new file mode 100644 index 0000000..93e108c --- /dev/null +++ b/src/events/event.hpp @@ -0,0 +1,91 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#ifndef EVENT_H +#define EVENT_H + +#include "../payload/self_describing_json.hpp" +#include "../payload/event_payload.hpp" +#include +#include + +using std::string; +using std::vector; + +namespace snowplow { +/** + * @brief Base class for all event types that concrete event types inherit from. + */ +class Event { +public: + /** + * @brief Construct a new Event object + */ + Event(); + + /** + * @return vector Custom event context + */ + vector get_context() const; + + /** + * @return unsigned long long* Pointer to user-defined Unix timestamp or NULL if not set + * + * Overrides automatically assigned timestamp. + */ + unsigned long long *get_true_timestamp() const; + + /** + * @brief Replace the custom context of the event with a new vector of self-describing JSONs. + * + * @param context New custom context + */ + void set_context(const vector &context); + + /** + * @brief Set the user-defined Unix timestamp (in ms) to the given pointer or NULL. + + * The true timestamp overrides the automatically assigned device timestamp. + * + * @param true_timestamp Pointer to true timestamp or NULL + */ + void set_true_timestamp(unsigned long long *true_timestamp); + +protected: + /** + * @brief This function is overriden by concrete event classes and returns payload with properties for the event types. + * + * @param use_base64 Whether to enable base 64 encoding for self-describing event body + * @return EventPayload Payload with the custom properties for the event type + */ + virtual EventPayload get_custom_event_payload(bool use_base64) const = 0; + + /** + * @brief Helper function to construct payload for a self-describing event given the self-describing JSON. + * + * @param event Self-describing JSON with the event schema and data + * @param use_base64 Whether to enable base 64 encoding for self-describing event body + * @return EventPayload Event payload + */ + EventPayload get_self_describing_event_payload(const SelfDescribingJson &event, bool use_base64) const; + +private: + unsigned long long *m_true_timestamp; + vector m_context; + EventPayload get_payload(bool use_base64) const; + + friend class Tracker; +}; +} // namespace snowplow + +#endif diff --git a/src/events/screen_view_event.cpp b/src/events/screen_view_event.cpp new file mode 100644 index 0000000..2047848 --- /dev/null +++ b/src/events/screen_view_event.cpp @@ -0,0 +1,39 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#include "screen_view_event.hpp" + +using namespace snowplow; +using std::invalid_argument; + +ScreenViewEvent::ScreenViewEvent() { + this->id = NULL; + this->name = NULL; +} + +EventPayload ScreenViewEvent::get_custom_event_payload(bool use_base64) const { + if (name == NULL && id == NULL) { + throw invalid_argument("Either name or id field must be set"); + } + + json data; + if (id != NULL) { + data[SNOWPLOW_SV_ID] = *id; + } + if (name != NULL) { + data[SNOWPLOW_SV_NAME] = *name; + } + + SelfDescribingJson event = SelfDescribingJson(SNOWPLOW_SCHEMA_SCREEN_VIEW, data); + return get_self_describing_event_payload(event, use_base64); +} diff --git a/src/events/screen_view_event.hpp b/src/events/screen_view_event.hpp new file mode 100644 index 0000000..71409e3 --- /dev/null +++ b/src/events/screen_view_event.hpp @@ -0,0 +1,50 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#ifndef SCREEN_VIEW_EVENT_H +#define SCREEN_VIEW_EVENT_H + +#include "event.hpp" + +namespace snowplow { + +const string SNOWPLOW_SCHEMA_SCREEN_VIEW = "iglu:com.snowplowanalytics.snowplow/screen_view/jsonschema/1-0-0"; + +/** + * @brief Event to track user viewing a screen within the application. + * + * Schema for the event: iglu:com.snowplowanalytics.snowplow/screen_view/jsonschema/1-0-0 + */ +class ScreenViewEvent : public Event { +public: + /** + * @brief Construct a new Screen View Event object + */ + ScreenViewEvent(); + + /** + * @brief The name of the screen viewed. + */ + string *name; + + /** + * @brief The id of screen that was viewed. + */ + string *id; + +protected: + EventPayload get_custom_event_payload(bool use_base64) const override; +}; +} // namespace snowplow + +#endif diff --git a/src/events/self_describing_event.cpp b/src/events/self_describing_event.cpp new file mode 100644 index 0000000..d0b1209 --- /dev/null +++ b/src/events/self_describing_event.cpp @@ -0,0 +1,23 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#include "self_describing_event.hpp" + +using namespace snowplow; + +SelfDescribingEvent::SelfDescribingEvent(const SelfDescribingJson &event) : event(event) { +} + +EventPayload SelfDescribingEvent::get_custom_event_payload(bool use_base64) const { + return get_self_describing_event_payload(event, use_base64); +} diff --git a/src/events/self_describing_event.hpp b/src/events/self_describing_event.hpp new file mode 100644 index 0000000..53bcd99 --- /dev/null +++ b/src/events/self_describing_event.hpp @@ -0,0 +1,52 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#ifndef SELF_DESCRIBING_EVENT_H +#define SELF_DESCRIBING_EVENT_H + +#include "event.hpp" + +namespace snowplow { +/** + * @brief Event to track custom information that does not fit into the out-of-the box events. + * + * Self-describing events are a [data structure based on JSON Schemas](https://docs.snowplowanalytics.com/docs/understanding-tracking-design/understanding-schemas-and-validation/) + * and can have arbitrarily many fields. + * To define your own custom self-describing event, you must create a JSON schema for that + * event and upload it to an [Iglu Schema Repository](https://github.com/snowplow/iglu) using + * [igluctl](https://docs.snowplowanalytics.com/docs/open-source-components-and-applications/iglu/) + * (or if a Snowplow BDP customer, you can use the + * [Snowplow BDP Console UI](https://docs.snowplowanalytics.com/docs/understanding-tracking-design/managing-data-structures/) + * or [Data Structures API](https://docs.snowplowanalytics.com/docs/understanding-tracking-design/managing-data-structures-via-the-api-2/)). + * Snowplow uses the schema to validate that the JSON containing the event properties is well-formed. + */ +class SelfDescribingEvent : public Event { +public: + /** + * @brief Construct a new Self Describing Event object + * + * @param event Main properties of the self-describing event including its schema and body + */ + SelfDescribingEvent(const SelfDescribingJson &event); + + /** + * @brief Main properties of the self-describing event including its schema and body + */ + SelfDescribingJson event; // required + +protected: + EventPayload get_custom_event_payload(bool use_base64) const override; +}; +} // namespace snowplow + +#endif diff --git a/src/events/structured_event.cpp b/src/events/structured_event.cpp new file mode 100644 index 0000000..d9f6861 --- /dev/null +++ b/src/events/structured_event.cpp @@ -0,0 +1,52 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#include "structured_event.hpp" + +using namespace snowplow; +using std::invalid_argument; +using std::to_string; + +StructuredEvent::StructuredEvent(const string &category, const string &action) { + this->category = category; + this->action = action; + this->label = NULL; + this->property = NULL; + this->value = NULL; +} + +EventPayload StructuredEvent::get_custom_event_payload(bool use_base64) const { + if (action == "") { + throw invalid_argument("Action is required"); + } + if (category == "") { + throw invalid_argument("Category is required"); + } + + EventPayload p; + p.add(SNOWPLOW_EVENT, SNOWPLOW_EVENT_STRUCTURED); + p.add(SNOWPLOW_SE_ACTION, action); + p.add(SNOWPLOW_SE_CATEGORY, category); + + if (label != NULL) { + p.add(SNOWPLOW_SE_LABEL, *label); + } + if (property != NULL) { + p.add(SNOWPLOW_SE_PROPERTY, *property); + } + if (value != NULL) { + p.add(SNOWPLOW_SE_VALUE, to_string(*value)); + } + + return p; +} diff --git a/src/events/structured_event.hpp b/src/events/structured_event.hpp new file mode 100644 index 0000000..5960682 --- /dev/null +++ b/src/events/structured_event.hpp @@ -0,0 +1,71 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#ifndef STRUCTURED_EVENT_H +#define STRUCTURED_EVENT_H + +#include "event.hpp" + +namespace snowplow { +/** + * @brief Event to capture custom consumer interactions without the need to define a custom schema. + */ +class StructuredEvent : public Event { +public: + /** + * @brief Construct a new Structured Event object + * + * @param category Name for the group of objects you want to track e.g. "media", "ecomm". + * @param action Defines the type of user interaction for the web object. + */ + StructuredEvent(const string &category, const string &action); + + /** + * @brief Name for the group of objects you want to track e.g. "media", "ecomm". + */ + string category; // required + + /** + * @brief Defines the type of user interaction for the web object. + * + * E.g., "play-video", "add-to-basket". + */ + string action; // required + + /** + * @brief Identifies the specific object being actioned. + * + * E.g., ID of the video being played, or the SKU or the product added-to-basket. + */ + string *label; + + /** + * @brief Describes the object or the action performed on it. + * + * This might be the quantity of an item added to basket + */ + string *property; + + /** + * @brief Quantifies or further describes the user action. + * + * This might be the price of an item added-to-basket, or the starting time of the video where play was just pressed. + */ + double *value; + +protected: + EventPayload get_custom_event_payload(bool use_base64) const override; +}; +} // namespace snowplow + +#endif diff --git a/src/events/timing_event.cpp b/src/events/timing_event.cpp new file mode 100644 index 0000000..ed80324 --- /dev/null +++ b/src/events/timing_event.cpp @@ -0,0 +1,46 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#include "timing_event.hpp" + +using namespace snowplow; +using std::invalid_argument; + +TimingEvent::TimingEvent(const string &category, const string &variable, unsigned long long timing) { + this->category = category; + this->variable = variable; + this->timing = timing; + this->label = NULL; +} + +EventPayload TimingEvent::get_custom_event_payload(bool use_base64) const { + if (category == "") { + throw invalid_argument("Category is required"); + } + if (variable == "") { + throw invalid_argument("Variable is required"); + } + + json data; + data[SNOWPLOW_UT_CATEGORY] = category; + data[SNOWPLOW_UT_VARIABLE] = variable; + data[SNOWPLOW_UT_TIMING] = timing; + + if (label != NULL) { + data[SNOWPLOW_UT_LABEL] = *label; + } + + SelfDescribingJson sdj = SelfDescribingJson(SNOWPLOW_SCHEMA_USER_TIMINGS, data); + + return get_self_describing_event_payload(sdj, use_base64); +} diff --git a/src/events/timing_event.hpp b/src/events/timing_event.hpp new file mode 100644 index 0000000..bc13546 --- /dev/null +++ b/src/events/timing_event.hpp @@ -0,0 +1,64 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#ifndef TIMING_H +#define TIMING_H + +#include "event.hpp" + +namespace snowplow { + +const string SNOWPLOW_SCHEMA_USER_TIMINGS = "iglu:com.snowplowanalytics.snowplow/timing/jsonschema/1-0-0"; + +/** + * @brief Event used to track user timing events such as how long resources take to load. + * + * Schema: iglu:com.snowplowanalytics.snowplow/timing/jsonschema/1-0-0 + */ +class TimingEvent : public Event { +public: + /** + * @brief Construct a new Timing Event object + * + * @param category Defines the timing category. + * @param variable Defines the timing variable measured. + * @param timing Represents the time. + */ + TimingEvent(const string &category, const string &variable, unsigned long long timing); + + /** + * @brief Defines the timing category. + */ + string category; // required + + /** + * @brief Defines the timing variable measured. + */ + string variable; // required + + /** + * @brief Represents the time. + */ + unsigned long long timing; // required + + /** + * @brief An optional string to further identify the timing event. + */ + string *label; + +protected: + EventPayload get_custom_event_payload(bool use_base64) const override; +}; +} // namespace snowplow + +#endif diff --git a/src/payload/event_payload.cpp b/src/payload/event_payload.cpp new file mode 100644 index 0000000..24ba507 --- /dev/null +++ b/src/payload/event_payload.cpp @@ -0,0 +1,34 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#include "event_payload.hpp" +#include "../utils.hpp" + +using namespace snowplow; +using std::to_string; + +EventPayload::EventPayload() { + this->m_event_id = Utils::get_uuid4(); + this->m_timestamp = Utils::get_unix_epoch_ms(); + + add(SNOWPLOW_TIMESTAMP, to_string(m_timestamp)); + add(SNOWPLOW_EID, m_event_id); +} + +string EventPayload::get_event_id() const { + return m_event_id; +} + +unsigned long long EventPayload::get_timestamp() const { + return m_timestamp; +} diff --git a/src/payload/event_payload.hpp b/src/payload/event_payload.hpp new file mode 100644 index 0000000..3bcb27e --- /dev/null +++ b/src/payload/event_payload.hpp @@ -0,0 +1,55 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#ifndef TRACKER_PAYLOAD_H +#define TRACKER_PAYLOAD_H + +#include "payload.hpp" +#include + +using std::string; + +namespace snowplow { +/** + * @brief Payload with event properties that is created for tracked events. + * + * In contrast with the base `Payload` class, `EventPayload` contains event ID and device timestamp properties. + */ +class EventPayload : public Payload { +public: + /** + * @brief Construct a new Event Payload and initializes the event ID and device timestamp. + */ + EventPayload(); + + /** + * @brief Get the event ID + * + * @return string Automatically generated event ID + */ + string get_event_id() const; + + /** + * @brief Get the device created timestamp + * + * @return unsigned long long Automatically assigned Unix timestamp + */ + unsigned long long get_timestamp() const; + +private: + unsigned long long m_timestamp; + string m_event_id; +}; +} // namespace snowplow + +#endif diff --git a/src/payload.cpp b/src/payload/payload.cpp similarity index 83% rename from src/payload.cpp rename to src/payload/payload.cpp index cd6ae7a..2f70163 100644 --- a/src/payload.cpp +++ b/src/payload/payload.cpp @@ -12,8 +12,10 @@ See the Apache License Version 2.0 for the specific language governing permissio */ #include "payload.hpp" +#include "../utils.hpp" using namespace snowplow; +using std::to_string; Payload::~Payload() { this->m_pairs.clear(); @@ -32,11 +34,11 @@ void Payload::add_map(map pairs) { } } -void Payload::add_payload(Payload p) { +void Payload::add_payload(const Payload &p) { this->add_map(p.get()); } -void Payload::add_json(json j, bool base64Encode, const string &encoded, const string ¬_encoded) { +void Payload::add_json(const json &j, bool base64Encode, const string &encoded, const string ¬_encoded) { if (base64Encode) { string json_str = j.dump(); this->add(encoded, base64_encode((const unsigned char *)json_str.c_str(), json_str.length())); @@ -45,6 +47,6 @@ void Payload::add_json(json j, bool base64Encode, const string &encoded, const s } } -map Payload::get() { - return this->m_pairs; +map Payload::get() const { + return m_pairs; } diff --git a/src/payload.hpp b/src/payload/payload.hpp similarity index 88% rename from src/payload.hpp rename to src/payload/payload.hpp index 9fca91d..f67ff9d 100644 --- a/src/payload.hpp +++ b/src/payload/payload.hpp @@ -14,8 +14,8 @@ See the Apache License Version 2.0 for the specific language governing permissio #ifndef PAYLOAD_H #define PAYLOAD_H -#include "../include/base64.hpp" -#include "../include/json.hpp" +#include "../../include/base64.hpp" +#include "../../include/json.hpp" #include #include @@ -54,7 +54,7 @@ class Payload { * * @param p Payload to add values from */ - void add_payload(Payload p); + void add_payload(const Payload &p); /** * @brief Add self-describing JSON data to the payload. @@ -64,14 +64,14 @@ class Payload { * @param encoded Key for encoded data * @param not_encoded Key for not-encoded data */ - void add_json(json j, bool base64Encode, const string &encoded, const string ¬_encoded); + void add_json(const json &j, bool base64Encode, const string &encoded, const string ¬_encoded); /** * @brief Get the payload key-value pairs. * * @return Payload as key-value pairs */ - map get(); + map get() const; }; } // namespace snowplow diff --git a/src/self_describing_json.cpp b/src/payload/self_describing_json.cpp similarity index 92% rename from src/self_describing_json.cpp rename to src/payload/self_describing_json.cpp index 4671744..0cc0fd5 100644 --- a/src/self_describing_json.cpp +++ b/src/payload/self_describing_json.cpp @@ -26,10 +26,10 @@ SelfDescribingJson::~SelfDescribingJson() { this->m_json.clear(); } -json SelfDescribingJson::get() { +json SelfDescribingJson::get() const { return this->m_json; } -string SelfDescribingJson::to_string() { +string SelfDescribingJson::to_string() const { return this->m_json.dump(); } diff --git a/src/self_describing_json.hpp b/src/payload/self_describing_json.hpp similarity index 90% rename from src/self_describing_json.hpp rename to src/payload/self_describing_json.hpp index 6c2b35e..87d46db 100644 --- a/src/self_describing_json.hpp +++ b/src/payload/self_describing_json.hpp @@ -15,8 +15,8 @@ See the Apache License Version 2.0 for the specific language governing permissio #define SELF_DESCRIBING_JSON_H #include -#include "constants.hpp" -#include "../include/json.hpp" +#include "../constants.hpp" +#include "../../include/json.hpp" using std::string; using json = nlohmann::json; @@ -36,7 +36,7 @@ class SelfDescribingJson { * @param schema Iglu schema (e.g., "iglu:com.snowplowanalytics.snowplow/timing/jsonschema/1-0-0") * @param data Data payload with unstructured set of properties */ - SelfDescribingJson(const string & schema, const json & data); + SelfDescribingJson(const string &schema, const json &data); /** * @brief Destroy the Self Describing Json object @@ -48,14 +48,14 @@ class SelfDescribingJson { * * @return json Content as a JSON object */ - json get(); + json get() const; /** * @brief Return the content of the self-describing JSON as string. * * @return string Content as a JSON string */ - string to_string(); + string to_string() const; }; } diff --git a/src/storage.hpp b/src/storage.hpp index 06a4f7e..c2a5dc1 100644 --- a/src/storage.hpp +++ b/src/storage.hpp @@ -21,7 +21,7 @@ See the Apache License Version 2.0 for the specific language governing permissio #include #include #include "utils.hpp" -#include "payload.hpp" +#include "payload/payload.hpp" #include "../include/sqlite3.h" #include "../include/json.hpp" diff --git a/src/subject.hpp b/src/subject.hpp index ac40198..c7a61c9 100644 --- a/src/subject.hpp +++ b/src/subject.hpp @@ -16,7 +16,7 @@ See the Apache License Version 2.0 for the specific language governing permissio #include #include -#include "payload.hpp" +#include "payload/payload.hpp" #include "constants.hpp" using std::map; diff --git a/src/tracker.cpp b/src/tracker.cpp index 4a83883..af9e160 100644 --- a/src/tracker.cpp +++ b/src/tracker.cpp @@ -92,7 +92,10 @@ void Tracker::set_subject(Subject *subject) { // --- Event Tracking -void Tracker::track(Payload payload, const string &event_id, vector &contexts) { +string Tracker::track(const Event &event) { + EventPayload payload = event.get_payload(m_use_base64); + vector context = event.get_context(); + // Add standard KV Pairs payload.add(SNOWPLOW_TRACKER_VERSION, SNOWPLOW_TRACKER_VERSION_LABEL); payload.add(SNOWPLOW_PLATFORM, this->m_platform); @@ -106,19 +109,19 @@ void Tracker::track(Payload payload, const string &event_id, vectorm_client_session) { - contexts.push_back(this->m_client_session->update_and_get_session_context(event_id)); + context.push_back(this->m_client_session->update_and_get_session_context(payload.get_event_id())); } // Add Desktop Context if available if (this->m_desktop_context) { - contexts.push_back(Utils::get_desktop_context()); + context.push_back(Utils::get_desktop_context()); } // Build the final context and add it to the payload - if (contexts.size() > 0) { + if (context.size() > 0) { json context_data_array; - for (int i = 0; i < contexts.size(); ++i) { - context_data_array.push_back(contexts[i].get()); + for (int i = 0; i < context.size(); ++i) { + context_data_array.push_back(context[i].get()); } SelfDescribingJson context_json(SNOWPLOW_SCHEMA_CONTEXTS, context_data_array); payload.add_json(context_json.get(), m_use_base64, SNOWPLOW_CONTEXT_ENCODED, SNOWPLOW_CONTEXT); @@ -126,147 +129,6 @@ void Tracker::track(Payload payload, const string &event_id, vectorm_emitter.add(payload); -} - -void Tracker::track_struct_event(StructuredEvent se) { - if (se.action == "") { - throw invalid_argument("Action is required"); - } - if (se.category == "") { - throw invalid_argument("Category is required"); - } - - Payload p; - p.add(SNOWPLOW_EVENT, SNOWPLOW_EVENT_STRUCTURED); - p.add(SNOWPLOW_SE_ACTION, se.action); - p.add(SNOWPLOW_SE_CATEGORY, se.category); - - if (se.label != NULL) { - p.add(SNOWPLOW_SE_LABEL, *se.label); - } - if (se.property != NULL) { - p.add(SNOWPLOW_SE_PROPERTY, *se.property); - } - if (se.value != NULL) { - p.add(SNOWPLOW_SE_VALUE, to_string(*se.value)); - } - - p.add(SNOWPLOW_TIMESTAMP, to_string(se.timestamp)); - p.add(SNOWPLOW_EID, se.event_id); - - if (se.true_timestamp != NULL) { - p.add(SNOWPLOW_TRUE_TIMESTAMP, to_string(*se.true_timestamp)); - } - - track(p, se.event_id, se.contexts); -} - -void Tracker::track_screen_view(Tracker::ScreenViewEvent sve) { - if (sve.name == NULL && sve.id == NULL) { - throw invalid_argument("Either name or id field must be set"); - } - - json data; - - if (sve.id != NULL) { - data[SNOWPLOW_SV_ID] = *sve.id; - } - if (sve.name != NULL) { - data[SNOWPLOW_SV_NAME] = *sve.name; - } - - SelfDescribingJson sdj = SelfDescribingJson(SNOWPLOW_SCHEMA_SCREEN_VIEW, data); - - SelfDescribingEvent sde(sdj); - sde.event_id = sve.event_id; - sde.timestamp = sve.timestamp; - sde.true_timestamp = sve.true_timestamp; - sde.contexts = sve.contexts; - - track_self_describing_event(sde); -} - -void Tracker::track_timing(TimingEvent te) { - if (te.category == "") { - throw invalid_argument("Category is required"); - } - if (te.variable == "") { - throw invalid_argument("Variable is required"); - } - - json data; - data[SNOWPLOW_UT_CATEGORY] = te.category; - data[SNOWPLOW_UT_VARIABLE] = te.variable; - data[SNOWPLOW_UT_TIMING] = te.timing; - - if (te.label != NULL) { - data[SNOWPLOW_UT_LABEL] = *te.label; - } - - SelfDescribingJson sdj = SelfDescribingJson(SNOWPLOW_SCHEMA_USER_TIMINGS, data); - - SelfDescribingEvent sde(sdj); - sde.event_id = te.event_id; - sde.timestamp = te.timestamp; - sde.true_timestamp = te.true_timestamp; - sde.contexts = te.contexts; - - track_self_describing_event(sde); -} - -void Tracker::track_self_describing_event(SelfDescribingEvent sde) { - Payload p; - p.add(SNOWPLOW_EVENT, SNOWPLOW_EVENT_UNSTRUCTURED); - p.add(SNOWPLOW_TIMESTAMP, to_string(sde.timestamp)); - p.add(SNOWPLOW_EID, sde.event_id); - - SelfDescribingJson sdj(SNOWPLOW_SCHEMA_UNSTRUCT_EVENT, sde.event.get()); - p.add_json(sdj.get(), this->m_use_base64, SNOWPLOW_UNSTRUCTURED_ENCODED, SNOWPLOW_UNSTRUCTURED); - - if (sde.true_timestamp != NULL) { - p.add(SNOWPLOW_TRUE_TIMESTAMP, to_string(*sde.true_timestamp)); - } - - track(p, sde.event_id, sde.contexts); -} - -// --- Event Builders - -Tracker::StructuredEvent::StructuredEvent(string category, string action) { - this->category = category; - this->action = action; - this->contexts = vector(); - this->event_id = Utils::get_uuid4(); - this->timestamp = Utils::get_unix_epoch_ms(); - this->true_timestamp = NULL; - this->label = NULL; - this->property = NULL; - this->value = NULL; -} - -Tracker::SelfDescribingEvent::SelfDescribingEvent(SelfDescribingJson event) : event(event) { - this->event_id = Utils::get_uuid4(); - this->timestamp = Utils::get_unix_epoch_ms(); - this->true_timestamp = NULL; - this->contexts = vector(); -} - -Tracker::ScreenViewEvent::ScreenViewEvent() { - this->contexts = vector(); - this->event_id = Utils::get_uuid4(); - this->timestamp = Utils::get_unix_epoch_ms(); - this->true_timestamp = NULL; - this->id = NULL; - this->name = NULL; -} -Tracker::TimingEvent::TimingEvent(string category, string variable, unsigned long long timing) { - this->category = category; - this->variable = variable; - this->timestamp = Utils::get_unix_epoch_ms(); - this->true_timestamp = NULL; - this->timing = timing; - this->contexts = vector(); - this->event_id = Utils::get_uuid4(); - this->label = NULL; + return payload.get_event_id(); } diff --git a/src/tracker.hpp b/src/tracker.hpp index 1c310dd..4e42dc5 100644 --- a/src/tracker.hpp +++ b/src/tracker.hpp @@ -18,7 +18,7 @@ See the Apache License Version 2.0 for the specific language governing permissio #include "emitter.hpp" #include "subject.hpp" #include "client_session.hpp" -#include "self_describing_json.hpp" +#include "events/event.hpp" using std::string; using std::vector; @@ -57,231 +57,6 @@ class Tracker { */ static void close(); - /** - * @brief Event to capture custom consumer interactions without the need to define a custom schema. - */ - class StructuredEvent { - public: - /** - * @brief Name for the group of objects you want to track e.g. "media", "ecomm". - */ - string category; // required - - /** - * @brief Defines the type of user interaction for the web object. - * - * E.g., "play-video", "add-to-basket". - */ - string action; // required - - /** - * @brief Identifies the specific object being actioned. - * - * E.g., ID of the video being played, or the SKU or the product added-to-basket. - */ - string *label; - - /** - * @brief Describes the object or the action performed on it. - * - * This might be the quantity of an item added to basket - */ - string *property; - - /** - * @brief Quantifies or further describes the user action. - * - * This might be the price of an item added-to-basket, or the starting time of the video where play was just pressed. - */ - double *value; - - /** - * @brief Unix timestamp (in ms) when the event was created. Assigned automatically. - * @deprecated Use the `true_timestamp` instead. - */ - unsigned long long timestamp; - - /** - * @brief ID of the event (UUID v4) that is assigned automatically. - * @deprecated The ability to set custom event ID will be removed in the future - */ - string event_id; - - /** - * @brief Optional, user-defined Unix timestamp (in ms) for the event to override the automatically assigned one. - */ - unsigned long long *true_timestamp; - - /** - * @brief Context entities added to the event. - * - */ - vector contexts; - - /** - * @brief Construct a new Structured Event object - * - * @param category Name for the group of objects you want to track e.g. "media", "ecomm". - * @param action Defines the type of user interaction for the web object. - */ - StructuredEvent(string category, string action); - }; - - /** - * @brief Event to track custom information that does not fit into the out-of-the box events. - * - * Self-describing events are a [data structure based on JSON Schemas](https://docs.snowplowanalytics.com/docs/understanding-tracking-design/understanding-schemas-and-validation/) - * and can have arbitrarily many fields. - * To define your own custom self-describing event, you must create a JSON schema for that - * event and upload it to an [Iglu Schema Repository](https://github.com/snowplow/iglu) using - * [igluctl](https://docs.snowplowanalytics.com/docs/open-source-components-and-applications/iglu/) - * (or if a Snowplow BDP customer, you can use the - * [Snowplow BDP Console UI](https://docs.snowplowanalytics.com/docs/understanding-tracking-design/managing-data-structures/) - * or [Data Structures API](https://docs.snowplowanalytics.com/docs/understanding-tracking-design/managing-data-structures-via-the-api-2/)). - * Snowplow uses the schema to validate that the JSON containing the event properties is well-formed. - */ - class SelfDescribingEvent { - public: - /** - * @brief Main properties of the self-describing event including it's schema and body - */ - SelfDescribingJson event; // required - - /** - * @brief Unix timestamp (in ms) when the event was created. Assigned automatically. - * @deprecated Use the `true_timestamp` instead. - */ - unsigned long long timestamp; - - /** - * @brief ID of the event (UUID v4) that is assigned automatically. - * @deprecated The ability to set custom event ID will be removed in the future - */ - string event_id; - - /** - * @brief Optional, user-defined Unix timestamp (in ms) for the event to override the automatically assigned one. - */ - unsigned long long *true_timestamp; - - /** - * @brief Context entities added to the event. - */ - vector contexts; - - /** - * @brief Construct a new Self Describing Event object - * - * @param event Main properties of the self-describing event including it's schema and body - */ - SelfDescribingEvent(SelfDescribingJson event); - }; - - /** - * @brief Event to track user viewing a screen within the application. - * - * Schema for the event: iglu:com.snowplowanalytics.snowplow/screen_view/jsonschema/1-0-0 - */ - class ScreenViewEvent { - public: - /** - * @brief The name of the screen viewed. - */ - string *name; - - /** - * @brief The id of screen that was viewed. - */ - string *id; - - /** - * @brief Unix timestamp (in ms) when the event was created. Assigned automatically. - * @deprecated Use the `true_timestamp` instead. - */ - unsigned long long timestamp; - - /** - * @brief ID of the event (UUID v4) that is assigned automatically. - * @deprecated The ability to set custom event ID will be removed in the future - */ - string event_id; - - /** - * @brief Optional, user-defined Unix timestamp (in ms) for the event to override the automatically assigned one. - */ - unsigned long long *true_timestamp; - - /** - * @brief Context entities added to the event. - */ - vector contexts; - - /** - * @brief Construct a new Screen View Event object - */ - ScreenViewEvent(); - }; - - /** - * @brief Event used to track user timing events such as how long resources take to load. - * - * Schema: iglu:com.snowplowanalytics.snowplow/timing/jsonschema/1-0-0 - */ - class TimingEvent { - public: - /** - * @brief Defines the timing category. - */ - string category; // required - - /** - * @brief Defines the timing variable measured. - */ - string variable; // required - - /** - * @brief Represents the time. - */ - unsigned long long timing; // required - - /** - * @brief An optional string to further identify the timing event. - */ - string *label; - - /** - * @brief Unix timestamp (in ms) when the event was created. Assigned automatically. - * @deprecated Use the `true_timestamp` instead. - */ - unsigned long long timestamp; - - /** - * @brief ID of the event (UUID v4) that is assigned automatically. - * @deprecated The ability to set custom event ID will be removed in the future - */ - string event_id; - - /** - * @brief Optional, user-defined Unix timestamp (in ms) for the event to override the automatically assigned one. - */ - unsigned long long *true_timestamp; - - /** - * @brief Context entities added to the event. - * - */ - vector contexts; - - /** - * @brief Construct a new Timing Event object - * - * @param category Defines the timing category. - * @param variable Defines the timing variable measured. - * @param timing Represents the time. - */ - TimingEvent(string category, string variable, unsigned long long timing); - }; - void start(); void stop(); @@ -300,33 +75,16 @@ class Tracker { void set_subject(Subject *subject); /** - * @brief Track en event with custom payload. We do not recommend using this function. Instead, track events using the predefined functions for each event. + * @brief Track a Snowplow event (e.g., an instance of `SelfDescribingEvent`, or `ScreenViewEvent`). * - * @param p Event payload - * @param event_id ID of the event - * @param contexts Vector of custom contexts - */ - void track(Payload p, const string & event_id, vector &contexts); - - /** - * @brief Track a Snowplow custom structured event which fits the Google Analytics-style structure of having up to five fields. - */ - void track_struct_event(StructuredEvent); - - /** - * @brief Track the user viewing a screen within the application. - */ - void track_screen_view(ScreenViewEvent); - - /** - * @brief Track a timing event. - */ - void track_timing(TimingEvent); - - /** - * @brief Track a Snowplow custom unstructured event. + * A Payload object will be created from the event. + * This is passed to the configured Emitter. + * The payload's event ID string (a UUID) is returned. + * + * @param event The event to track + * @return Tracked event ID */ - void track_self_describing_event(SelfDescribingEvent); + string track(const Event &event); private: static Tracker *m_instance; diff --git a/src/utils.hpp b/src/utils.hpp index 9188ab5..ac122c6 100644 --- a/src/utils.hpp +++ b/src/utils.hpp @@ -24,8 +24,8 @@ See the Apache License Version 2.0 for the specific language governing permissio #include #include #include -#include "self_describing_json.hpp" -#include "payload.hpp" +#include "payload/self_describing_json.hpp" +#include "payload/payload.hpp" #include "../include/json.hpp" #if defined(WIN32) || defined(_WIN32) || defined(__WIN32) && !defined(__CYGWIN__) diff --git a/test/payload/event_payload_test.cpp b/test/payload/event_payload_test.cpp new file mode 100644 index 0000000..33408b9 --- /dev/null +++ b/test/payload/event_payload_test.cpp @@ -0,0 +1,33 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#include "../../src/payload/event_payload.hpp" +#include "../../src/utils.hpp" +#include "../catch.hpp" +#include +#include + +using namespace snowplow; + +TEST_CASE("event payload") { + EventPayload pl; + + SECTION("is initialized with event ID and timestamp") { + REQUIRE(pl.get().size() == 2); + unsigned long long time_now = Utils::get_unix_epoch_ms(); + REQUIRE(pl.get_timestamp() > (time_now - 1000)); + REQUIRE(pl.get_timestamp() < (time_now + 1000)); + REQUIRE(pl.get()["eid"] == pl.get_event_id()); + REQUIRE(pl.get()["dtm"] == std::to_string(pl.get_timestamp())); + } +} diff --git a/test/payload_test.cpp b/test/payload/payload_test.cpp similarity index 94% rename from test/payload_test.cpp rename to test/payload/payload_test.cpp index 69cf23c..a4e273d 100644 --- a/test/payload_test.cpp +++ b/test/payload/payload_test.cpp @@ -11,9 +11,10 @@ software distributed under the Apache License Version 2.0 is distributed on an See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. */ -#include "../include/json.hpp" -#include "../src/payload.hpp" -#include "catch.hpp" +#include "../../include/json.hpp" +#include "../../src/payload/payload.hpp" +#include "../../src/utils.hpp" +#include "../catch.hpp" #include #include diff --git a/test/self_describing_json_test.cpp b/test/payload/self_describing_json_test.cpp similarity index 93% rename from test/self_describing_json_test.cpp rename to test/payload/self_describing_json_test.cpp index 911b470..223bad0 100644 --- a/test/self_describing_json_test.cpp +++ b/test/payload/self_describing_json_test.cpp @@ -11,8 +11,8 @@ software distributed under the Apache License Version 2.0 is distributed on an See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. */ -#include "../src/self_describing_json.hpp" -#include "catch.hpp" +#include "../../src/payload/self_describing_json.hpp" +#include "../catch.hpp" using namespace snowplow; diff --git a/test/tracker_test.cpp b/test/tracker_test.cpp index e3ab1f4..90e8d07 100644 --- a/test/tracker_test.cpp +++ b/test/tracker_test.cpp @@ -15,6 +15,10 @@ See the Apache License Version 2.0 for the specific language governing permissio #include "../include/json.hpp" #include "../src/emitter.hpp" #include "../src/tracker.hpp" +#include "../src/events/structured_event.hpp" +#include "../src/events/screen_view_event.hpp" +#include "../src/events/self_describing_event.hpp" +#include "../src/events/timing_event.hpp" #include "http/test_http_client.hpp" #include "catch.hpp" @@ -78,6 +82,28 @@ TEST_CASE("tracker") { Tracker::close(); } + SECTION("Tracker returns unique event ID") { + MockEmitter e; + ClientSession cs("test-tracker.db", 5000, 5000); + string platform = "pc"; + string app_id = "snowplow-test-suite"; + string name_space = "snowplow-testing"; + bool base64 = false; + bool desktop_context = false; + + Tracker *t = Tracker::init(e, NULL, &cs, &platform, &app_id, &name_space, &base64, &desktop_context); + + StructuredEvent sv("hello", "world"); + string sv_id_1 = t->track(sv); + REQUIRE(sv_id_1.size() > 5); + + string sv_id_2 = t->track(sv); + REQUIRE(sv_id_2.size() > 5); + REQUIRE(sv_id_1 != sv_id_2); + + Tracker::close(); + } + SECTION("Tracker controls should provide expected behaviour") { MockEmitter e; ClientSession cs("test-tracker.db", 5000, 5000); @@ -89,8 +115,8 @@ TEST_CASE("tracker") { Tracker *t = Tracker::init(e, NULL, &cs, &platform, &app_id, &name_space, &base64, &desktop_context); - Tracker::StructuredEvent sv("hello", "world"); - t->track_struct_event(sv); + StructuredEvent sv("hello", "world"); + t->track(sv); vector payloads = e.get_added_payloads(); REQUIRE(payloads.size() == 1); @@ -107,7 +133,7 @@ TEST_CASE("tracker") { Subject s; s.set_screen_resolution(1920, 1080); Tracker::instance()->set_subject(&s); - t->track_struct_event(sv); + t->track(sv); payloads = e.get_added_payloads(); REQUIRE(payloads.size() == 2); @@ -124,7 +150,7 @@ TEST_CASE("tracker") { s.set_screen_resolution(1080, 1920); Tracker::instance()->set_subject(&s); - t->track_struct_event(sv); + t->track(sv); payloads = e.get_added_payloads(); REQUIRE(payloads.size() == 3); @@ -153,9 +179,7 @@ TEST_CASE("tracker") { REQUIRE(e.is_started() == true); - vector v; - Payload p; - t->track(p, "eid", v); + t->track(StructuredEvent("c", "a")); vector payloads = e.get_added_payloads(); REQUIRE(payloads.size() == 1); @@ -182,9 +206,7 @@ TEST_CASE("tracker") { REQUIRE(e.is_started() == true); - vector v; - Payload p; - t->track(p, "eid", v); + t->track(StructuredEvent("c", "a")); vector payloads = e.get_added_payloads(); REQUIRE(payloads.size() == 1); @@ -202,70 +224,54 @@ TEST_CASE("tracker") { // --- Event Builders SECTION("StructuredEvents have appropriate defaults") { - unsigned long long time_now = Utils::get_unix_epoch_ms(); - Tracker::StructuredEvent s("category", "action"); + StructuredEvent s("category", "action"); REQUIRE(s.category == "category"); REQUIRE(s.action == "action"); - REQUIRE(s.contexts.size() == 0); - REQUIRE(s.event_id.size() > 5); + REQUIRE(s.get_context().size() == 0); REQUIRE(s.label == NULL); - REQUIRE(s.timestamp > (time_now - 1000)); - REQUIRE(s.timestamp < (time_now + 1000)); - REQUIRE(s.true_timestamp == NULL); + REQUIRE(s.get_true_timestamp() == NULL); REQUIRE(s.value == NULL); } SECTION("SelfDescribingEvents have appropriate defaults") { - unsigned long long time_now = Utils::get_unix_epoch_ms(); SelfDescribingJson e = SelfDescribingJson("abc", "{\"hello\": \"world\"}"_json); - Tracker::SelfDescribingEvent sde(e); + SelfDescribingEvent sde(e); REQUIRE(sde.event.to_string() == e.to_string()); - REQUIRE(sde.contexts.size() == 0); - REQUIRE(sde.event_id.size() > 5); - REQUIRE(sde.timestamp > time_now - 1000); - REQUIRE(sde.timestamp < time_now + 1000); - REQUIRE(sde.true_timestamp == NULL); + REQUIRE(sde.get_context().size() == 0); + REQUIRE(sde.get_true_timestamp() == NULL); } SECTION("ScreenViewEvents have appropriate defaults") { - unsigned long long time_now = Utils::get_unix_epoch_ms(); - Tracker::ScreenViewEvent sve; - REQUIRE(sve.contexts.size() == 0); - REQUIRE(sve.event_id.size() > 5); + ScreenViewEvent sve; + REQUIRE(sve.get_context().size() == 0); REQUIRE(sve.id == NULL); REQUIRE(sve.name == NULL); - REQUIRE(sve.timestamp > time_now - 1000); - REQUIRE(sve.timestamp < time_now + 1000); - REQUIRE(sve.true_timestamp == NULL); + REQUIRE(sve.get_true_timestamp() == NULL); } SECTION("TimingEvents have appropriate defaults") { - unsigned long long time_now = Utils::get_unix_epoch_ms(); - Tracker::TimingEvent t("cat", "variable", 123); + TimingEvent t("cat", "variable", 123); REQUIRE(t.category == "cat"); REQUIRE(t.variable == "variable"); - REQUIRE(t.timestamp > time_now - 1000); - REQUIRE(t.timestamp < time_now + 1000); - REQUIRE(t.true_timestamp == NULL); + REQUIRE(t.get_true_timestamp() == NULL); REQUIRE(t.label == NULL); REQUIRE(t.timing == 123); - REQUIRE(t.contexts.size() == 0); - REQUIRE(t.event_id.size() > 5); + REQUIRE(t.get_context().size() == 0); } // --- Event Tracker Functions - SECTION("track_struct_event generates sane event") { + SECTION("track StructuredEvent generates sane event") { bool is_arg_exception_empty_category; bool is_arg_exception_empty_action; MockEmitter e; Tracker *t = Tracker::init(e, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - Tracker::StructuredEvent sv("", "hello"); + StructuredEvent sv("", "hello"); try { - t->track_struct_event(sv); + t->track(sv); } catch (invalid_argument) { is_arg_exception_empty_category = true; } @@ -274,7 +280,7 @@ TEST_CASE("tracker") { sv.category = "hello"; try { - t->track_struct_event(sv); + t->track(sv); } catch (invalid_argument) { is_arg_exception_empty_action = true; } @@ -286,7 +292,7 @@ TEST_CASE("tracker") { sv.action = "action"; sv.category = "category"; - t->track_struct_event(sv); + t->track(sv); REQUIRE(e.get_added_payloads().size() == 1); @@ -303,8 +309,10 @@ TEST_CASE("tracker") { REQUIRE(payload[SNOWPLOW_EID].size() > 5); REQUIRE(payload.find(SNOWPLOW_TRUE_TIMESTAMP) == payload.end()); - sv.contexts = vector(); - sv.contexts.push_back(SelfDescribingJson("hello", "{\"hello\":\"world\"}"_json)); + + vector context; + context.push_back(SelfDescribingJson("hello", "{\"hello\":\"world\"}"_json)); + sv.set_context(context); string label = "label"; sv.label = &label; string property = "property"; @@ -312,14 +320,17 @@ TEST_CASE("tracker") { double value = 11.11; sv.value = &value; unsigned long long ts = Utils::get_unix_epoch_ms(); - sv.true_timestamp = &ts; + sv.set_true_timestamp(&ts); - t->track_struct_event(sv); + t->track(sv); auto new_payload = e.get_added_payloads()[1].get(); REQUIRE(new_payload[SNOWPLOW_TIMESTAMP].size() > 10); REQUIRE(new_payload[SNOWPLOW_EID].size() > 5); REQUIRE(new_payload[SNOWPLOW_TIMESTAMP].size() > 10); + unsigned long long timestamp = std::stoull(new_payload[SNOWPLOW_TIMESTAMP]); + REQUIRE(timestamp > (ts - 1000)); + REQUIRE(timestamp < (ts + 1000)); REQUIRE(new_payload[SNOWPLOW_SE_LABEL] == "label"); REQUIRE(new_payload[SNOWPLOW_SE_PROPERTY] == "property"); REQUIRE(new_payload[SNOWPLOW_SE_VALUE] == to_string(11.11)); @@ -328,14 +339,14 @@ TEST_CASE("tracker") { Tracker::close(); } - SECTION("track_screen_view generates sane event") { + SECTION("track ScreenViewEvent generates sane event") { MockEmitter e; Tracker *t = Tracker::init(e, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - Tracker::ScreenViewEvent se; + ScreenViewEvent se; string id = "123"; se.id = &id; - t->track_screen_view(se); + t->track(se); REQUIRE(e.get_added_payloads().size() == 1); auto payload = e.get_added_payloads()[0].get(); @@ -362,9 +373,9 @@ TEST_CASE("tracker") { string name = "name"; se.name = &name; unsigned long long ttm = Utils::get_unix_epoch_ms(); - se.true_timestamp = &ttm; + se.set_true_timestamp(&ttm); - t->track_screen_view(se); + t->track(se); auto new_payload = e.get_added_payloads()[1].get(); REQUIRE(new_payload[SNOWPLOW_TRUE_TIMESTAMP] == to_string(ttm)); @@ -383,7 +394,7 @@ TEST_CASE("tracker") { se.name = NULL; bool arg_exception_on_no_id_or_name = false; try { - t->track_screen_view(se); + t->track(se); } catch (invalid_argument) { arg_exception_on_no_id_or_name = true; } @@ -393,12 +404,12 @@ TEST_CASE("tracker") { Tracker::close(); } - SECTION("track_timing generates a sane event") { + SECTION("track TimingEvent generates a sane event") { MockEmitter e; Tracker *t = Tracker::init(e, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - Tracker::TimingEvent te("category", "variable", 123); - t->track_timing(te); + TimingEvent te("category", "variable", 123); + t->track(te); REQUIRE(e.get_added_payloads().size() == 1); @@ -427,9 +438,9 @@ TEST_CASE("tracker") { string label = "hello world"; te.label = &label; unsigned long long ts = Utils::get_unix_epoch_ms(); - te.true_timestamp = &ts; + te.set_true_timestamp(&ts); - t->track_timing(te); + t->track(te); expected[SNOWPLOW_UT_LABEL] = "hello world"; auto new_payload = e.get_added_payloads()[1].get(); @@ -442,20 +453,20 @@ TEST_CASE("tracker") { REQUIRE(base64_decode(new_payload[SNOWPLOW_UNSTRUCTURED_ENCODED]) == json_w_label); - Tracker::TimingEvent te1("", "", 123); + TimingEvent te1("", "", 123); bool arg_exception_on_no_category = false; try { - t->track_timing(te1); + t->track(te1); } catch (invalid_argument) { arg_exception_on_no_category = true; } REQUIRE(arg_exception_on_no_category == true); - Tracker::TimingEvent te2("category", "", 123); + TimingEvent te2("category", "", 123); bool arg_exception_on_no_variable = false; try { - t->track_timing(te2); + t->track(te2); } catch (invalid_argument) { arg_exception_on_no_variable = true; } @@ -465,15 +476,15 @@ TEST_CASE("tracker") { Tracker::close(); } - SECTION("track_self_describing_event generates a sane event") { + SECTION("track SelfDescribingEvent generates a sane event") { MockEmitter e; bool desktop_context = false; Tracker *t = Tracker::init(e, NULL, NULL, NULL, NULL, NULL, NULL, NULL); SelfDescribingJson sdj("schema", "{ \"hello\":\"world\" }"_json); - Tracker::SelfDescribingEvent sde(sdj); - t->track_self_describing_event(sde); + SelfDescribingEvent sde(sdj); + t->track(sde); REQUIRE(e.get_added_payloads().size() == 1); @@ -483,7 +494,7 @@ TEST_CASE("tracker") { REQUIRE(payload[SNOWPLOW_PLATFORM] == "srv"); REQUIRE(payload[SNOWPLOW_APP_ID] == ""); REQUIRE(payload[SNOWPLOW_SP_NAMESPACE] == ""); - REQUIRE(payload[SNOWPLOW_EVENT] == SNOWPLOW_EVENT_UNSTRUCTURED); + REQUIRE(payload[SNOWPLOW_EVENT] == SNOWPLOW_EVENT_SELF_DESCRIBING); REQUIRE(payload[SNOWPLOW_TIMESTAMP].size() > 10); REQUIRE(payload[SNOWPLOW_EID].size() > 5); REQUIRE(payload.count(SNOWPLOW_TRUE_TIMESTAMP) == 0); From 80af26c769dc6f5ef15003dc9e0de8d189cc44d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matu=CC=81s=CC=8C=20Tomlein?= Date: Fri, 8 Apr 2022 12:41:27 +0200 Subject: [PATCH 03/35] Add support for Linux when generating UUIDs (close #46) --- src/utils.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/utils.cpp b/src/utils.cpp index f8b2987..186d3e8 100644 --- a/src/utils.cpp +++ b/src/utils.cpp @@ -59,6 +59,18 @@ string Utils::get_uuid4() { return uuid; } +#else + +#include + +string Utils::get_uuid4() { + uuid_t uuid; + char str[200]; + uuid_generate_random(uuid); + uuid_unparse(uuid, str); + return string(str); +} + #endif string Utils::int_list_to_string(list *int_list, const string &delimiter) { From 068be229f4ba9cc78b5602ff81e2ac4b1f44eb3a Mon Sep 17 00:00:00 2001 From: Matus Tomlein Date: Fri, 8 Apr 2022 14:15:34 +0200 Subject: [PATCH 04/35] Add support for Linux in desktop context (close #47) --- src/http/http_client_curl.cpp | 3 +- src/http/http_client_curl.hpp | 1 - src/utils.cpp | 51 +++++++++++++++++++------ test/utils_test.cpp | 71 +++++++++++++++++++++-------------- 4 files changed, 82 insertions(+), 44 deletions(-) diff --git a/src/http/http_client_curl.cpp b/src/http/http_client_curl.cpp index 32bcb66..540624c 100644 --- a/src/http/http_client_curl.cpp +++ b/src/http/http_client_curl.cpp @@ -14,12 +14,11 @@ See the Apache License Version 2.0 for the specific language governing permissio #if !defined(WIN32) && !defined(_WIN32) && !defined(__WIN32) || defined(__CYGWIN__) #include "http_client_curl.hpp" #include "../constants.hpp" -#include "curl/curl.h" +#include using namespace snowplow; using std::cerr; using std::endl; -using std::lock_guard; HttpClientCurl::HttpClientCurl() { curl_global_init(CURL_GLOBAL_ALL); diff --git a/src/http/http_client_curl.hpp b/src/http/http_client_curl.hpp index 03de194..efeaa14 100644 --- a/src/http/http_client_curl.hpp +++ b/src/http/http_client_curl.hpp @@ -23,7 +23,6 @@ See the Apache License Version 2.0 for the specific language governing permissio using std::string; using std::list; -using std::mutex; namespace snowplow { /** diff --git a/src/utils.cpp b/src/utils.cpp index 186d3e8..470fc66 100644 --- a/src/utils.cpp +++ b/src/utils.cpp @@ -215,20 +215,34 @@ int Utils::get_device_processor_count() { return sysinfo.dwNumberOfProcessors; } -#elif defined(__APPLE__) +#else -string Utils::get_os_type() { - return "macOS"; +bool Utils::get_os_is_64bit() { +#if INTPTR_MAX == INT64_MAX + return true; +#else + return false; +#endif } -string Utils::get_os_version() { - return get_os_version_objc(); +int Utils::get_device_processor_count() { + return std::thread::hardware_concurrency(); } string Utils::get_os_service_pack() { return ""; } +#if defined(__APPLE__) + +string Utils::get_os_type() { + return "macOS"; +} + +string Utils::get_os_version() { + return get_os_version_objc(); +} + string Utils::get_device_manufacturer() { return "Apple Inc."; } @@ -240,16 +254,29 @@ string Utils::get_device_model() { return str; } -bool Utils::get_os_is_64bit() { -#if INTPTR_MAX == INT64_MAX - return true; #else - return false; -#endif + +#include + +string Utils::get_os_type() { + utsname info; + uname(&info); + return info.sysname; // e.g., Linux } -int Utils::get_device_processor_count() { - return std::thread::hardware_concurrency(); +string Utils::get_os_version() { + utsname info; + uname(&info); + return info.version; // e.g., #26~20.04.1-Ubuntu SMP Sat Jan 8 18:05:46 UTC 2022 +} + +string Utils::get_device_manufacturer() { + return ""; } +string Utils::get_device_model() { + return ""; +} + +#endif #endif diff --git a/test/utils_test.cpp b/test/utils_test.cpp index eea4a39..3ba185d 100644 --- a/test/utils_test.cpp +++ b/test/utils_test.cpp @@ -91,8 +91,15 @@ TEST_CASE("utils") { } SECTION("get_device_context should populate OS specific information correctly") { -#if defined(WIN32) || defined(_WIN32) || defined(__WIN32) && !defined(__CYGWIN__) + string os_type = Utils::get_os_type(); + string os_version = Utils::get_os_version(); + string os_service_pack = Utils::get_os_service_pack(); + bool os_is_64bit = Utils::get_os_is_64bit(); + string device_manufacturer = Utils::get_device_manufacturer(); + string device_model = Utils::get_device_model(); + int device_processor_count = Utils::get_device_processor_count(); +#if defined(WIN32) || defined(_WIN32) || defined(__WIN32) && !defined(__CYGWIN__) DWORD expected_proc_count = std::thread::hardware_concurrency(); OSVERSIONINFOEX osviex; @@ -100,47 +107,53 @@ TEST_CASE("utils") { osviex.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); REQUIRE(::GetVersionEx((LPOSVERSIONINFO)&osviex) != 0); - string os_version = to_string(osviex.dwMajorVersion) + "." + to_string(osviex.dwMinorVersion) + "." + to_string(osviex.dwBuildNumber); - string service_pack = to_string(osviex.wServicePackMajor) + "." + to_string(osviex.wServicePackMinor); + string expected_os_version = to_string(osviex.dwMajorVersion) + "." + to_string(osviex.dwMinorVersion) + "." + to_string(osviex.dwBuildNumber); + string expected_service_pack = to_string(osviex.wServicePackMajor) + "." + to_string(osviex.wServicePackMinor); - bool is_64_bit_os; + bool expected_is_64_bit_os; #if defined(_WIN64) - is_64_bit_os = true; + expected_is_64_bit_os = true; #elif defined(_WIN32) BOOL f64 = FALSE; - is_64_bit_os = IsWow64Process(GetCurrentProcess(), &f64) && f64; + expected_is_64_bit_os = IsWow64Process(GetCurrentProcess(), &f64) && f64; #else - is_64_bit_os = false; + expected_is_64_bit_os = false; #endif - REQUIRE("Windows" == Utils::get_os_type()); - REQUIRE(os_version == Utils::get_os_version()); - REQUIRE(service_pack == Utils::get_os_service_pack()); - REQUIRE(is_64_bit_os == Utils::get_os_is_64bit()); - REQUIRE("" == Utils::get_device_manufacturer()); - REQUIRE("" == Utils::get_device_model()); - REQUIRE(expected_proc_count == Utils::get_device_processor_count()); + REQUIRE("Windows" == os_type); + REQUIRE(expected_os_version == os_version); + REQUIRE(expected_service_pack == os_service_pack); + REQUIRE(expected_is_64_bit_os == os_is_64bit); + REQUIRE("" == device_manufacturer); + REQUIRE("" == device_model); + REQUIRE(expected_proc_count == device_processor_count); #elif defined(__APPLE__) - REQUIRE("macOS" == Utils::get_os_type()); - REQUIRE("" != Utils::get_os_version()); - REQUIRE("" == Utils::get_os_service_pack()); - REQUIRE((true || false) == Utils::get_os_is_64bit()); - REQUIRE("Apple Inc." == Utils::get_device_manufacturer()); - REQUIRE("" != Utils::get_device_model()); - REQUIRE(0 != Utils::get_device_processor_count()); + REQUIRE("macOS" == os_type); + REQUIRE("" != os_version); + REQUIRE("" == os_service_pack); + REQUIRE("Apple Inc." == device_manufacturer); + REQUIRE("" != device_model); + REQUIRE(0 != device_processor_count); +#else + REQUIRE("Linux" == os_type); + REQUIRE("" != os_version); + REQUIRE("" == os_service_pack); + REQUIRE("" == device_manufacturer); + REQUIRE("" == device_model); + REQUIRE(0 != device_processor_count); +#endif SelfDescribingJson desktop_context = Utils::get_desktop_context(); json desktop_context_json = desktop_context.get(); json desktop_context_data = desktop_context_json[SNOWPLOW_DATA]; REQUIRE(SNOWPLOW_SCHEMA_DESKTOP_CONTEXT == desktop_context_json[SNOWPLOW_SCHEMA].get()); - REQUIRE("macOS" == desktop_context_data[SNOWPLOW_DESKTOP_OS_TYPE].get()); - REQUIRE("" != desktop_context_data[SNOWPLOW_DESKTOP_OS_VERSION].get()); - REQUIRE("" == desktop_context_data[SNOWPLOW_DESKTOP_OS_SERVICE_PACK].get()); - REQUIRE((true || false) == desktop_context_data[SNOWPLOW_DESKTOP_OS_IS_64_BIT].get()); - REQUIRE("Apple Inc." == desktop_context_data[SNOWPLOW_DESKTOP_DEVICE_MANU].get()); - REQUIRE("" != desktop_context_data[SNOWPLOW_DESKTOP_DEVICE_MODEL].get()); - REQUIRE(0 != desktop_context_data[SNOWPLOW_DESKTOP_DEVICE_PROC_COUNT].get()); -#endif + REQUIRE(os_type == desktop_context_data[SNOWPLOW_DESKTOP_OS_TYPE].get()); + REQUIRE(os_version == desktop_context_data[SNOWPLOW_DESKTOP_OS_VERSION].get()); + REQUIRE(os_service_pack == desktop_context_data[SNOWPLOW_DESKTOP_OS_SERVICE_PACK].get()); + REQUIRE(os_is_64bit == desktop_context_data[SNOWPLOW_DESKTOP_OS_IS_64_BIT].get()); + REQUIRE(device_manufacturer == desktop_context_data[SNOWPLOW_DESKTOP_DEVICE_MANU].get()); + REQUIRE(device_model == desktop_context_data[SNOWPLOW_DESKTOP_DEVICE_MODEL].get()); + REQUIRE(device_processor_count == desktop_context_data[SNOWPLOW_DESKTOP_DEVICE_PROC_COUNT].get()); } } From 031c3f7eba501faccc5a4bb24dd16f5c942b6cbf Mon Sep 17 00:00:00 2001 From: Matus Tomlein Date: Fri, 8 Apr 2022 14:20:36 +0200 Subject: [PATCH 05/35] Add CI build on Linux and update build instructions (close #50) --- .github/workflows/build.yml | 15 +++++++++++++++ Makefile | 11 +++++++++++ README.md | 13 ++++++++++--- test/storage_test.cpp | 1 + 4 files changed, 37 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index cf14bff..5f9e86f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -31,3 +31,18 @@ jobs: - name: Run tests run: .\x64\Debug\snowplow-cpp-tracker.exe + + test-linux: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Install packages + run: sudo apt-get install -y libcurl4-openssl-dev uuid-dev + + - name: Make + run: make + + - name: Run tests + run: make unit-tests diff --git a/Makefile b/Makefile index c91509a..220a4ed 100644 --- a/Makefile +++ b/Makefile @@ -29,10 +29,16 @@ cxx-test-objects := $(patsubst %.cpp, %.o, $(cxx-test-files)) cxx-example-objects := $(patsubst %.cpp, %.o, $(cxx-example-files)) cxx-performance-objects := $(patsubst %.cpp, %.o, $(cxx-performance-files)) +UNAME_S := $(shell uname -s) +ifeq ($(UNAME_S), Darwin) # Objective-C++ Files objcxx-src-files := $(shell find src -maxdepth 1 -name "*.mm") objcxx-objects := $(patsubst %.mm, %.o, $(objcxx-src-files)) +else +objcxx-src-files := +objcxx-objects := +endif # Arguments @@ -41,8 +47,13 @@ CXX := g++ OBJCXX := c++ CCFLAGS := -Werror -g CXXFLAGS := -std=c++11 -Werror -g -D SNOWPLOW_TEST_SUITE --coverage -O0 +ifeq ($(UNAME_S), Darwin) LDFLAGS := -framework CoreFoundation -framework CFNetwork -framework Foundation -framework CoreServices LDLIBS := -lcurl +else +LDLIBS := -lcurl -pthread -ldl -luuid +LDFLAGS := +endif # Building diff --git a/README.md b/README.md index 358562a..b495f4b 100644 --- a/README.md +++ b/README.md @@ -12,12 +12,19 @@ Snowplow C++ tracker enables you to add analytics to your C++ applications, serv ## Quick Start -The tracker currently supports macOS and Windows. +The tracker supports macOS, Windows, and Linux. ### Installation Download the most recent release from the [releases section](https://github.com/snowplow/snowplow-cpp-tracker/releases). Everything in both the `src` and `include` folders will need to be included in your application. It is important to keep the same folder structure as references to the included headers have been done like so: `../include/json.hpp`. +#### Requirements under Linux + +The following libraries need to be installed: + +* curl (using `apt install libcurl4-openssl-dev` on Ubuntu) +* uuid (using `apt install uuid-dev` on Ubuntu) + ### Using the tracker Import and initialize the tracker with your Snowplow collector endpoint and tracker configuration: @@ -62,7 +69,7 @@ Check the tracked events in a [Snowplow Micro](https://docs.snowplowanalytics.co ## Developer Quick Start -### Building on macOS +### Building on macOS and Linux ```bash host> git clone https://github.com/snowplow/snowplow-cpp-tracker @@ -92,7 +99,7 @@ To run the test suite: host> make unit-tests ``` -If you wish to generate a local code coverage report you will first need to install [lcov](http://ltp.sourceforge.net/coverage/lcov.php) on your host machine. The easiest way to do this is using [brew](http://brew.sh/): +If you wish to generate a local code coverage report you will first need to install [lcov](http://ltp.sourceforge.net/coverage/lcov.php) on your host machine. The easiest way to do this is using [brew](http://brew.sh/) under macOS: ```bash host> brew install lcov diff --git a/test/storage_test.cpp b/test/storage_test.cpp index f4f40dd..1e49e4a 100644 --- a/test/storage_test.cpp +++ b/test/storage_test.cpp @@ -18,6 +18,7 @@ using namespace snowplow; using std::runtime_error; TEST_CASE("storage") { + Storage::close(); Storage *storage = Storage::init("test1.db"); REQUIRE("test1.db" == storage->get_db_name()); storage->delete_all_event_rows(); From 12100e9a0cf51fa1cf02bf947ab123335aa20b34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=BA=C5=A1=20Tomlein?= Date: Thu, 14 Apr 2022 09:59:56 +0200 Subject: [PATCH 06/35] Add a header file that includes all the published APIs (close #55) PR #56 --- README.md | 4 +-- examples/main.cpp | 5 +--- performance/main.cpp | 3 +- performance/mock_client_session.hpp | 2 +- performance/mock_emitter.hpp | 2 +- performance/mute_emitter.hpp | 2 +- performance/run.cpp | 6 +--- src/snowplow.hpp | 43 +++++++++++++++++++++++++++++ 8 files changed, 51 insertions(+), 16 deletions(-) create mode 100644 src/snowplow.hpp diff --git a/README.md b/README.md index b495f4b..a8d6c94 100644 --- a/README.md +++ b/README.md @@ -27,10 +27,10 @@ The following libraries need to be installed: ### Using the tracker -Import and initialize the tracker with your Snowplow collector endpoint and tracker configuration: +Import using the `snowplow.hpp` header file and initialize the tracker with your Snowplow collector endpoint and tracker configuration: ```cpp -#include "tracker.hpp" +#include "snowplow.hpp" using namespace snowplow; diff --git a/examples/main.cpp b/examples/main.cpp index 593149a..bc36435 100644 --- a/examples/main.cpp +++ b/examples/main.cpp @@ -2,10 +2,7 @@ #include #include -#include "../src/tracker.hpp" -#include "../src/events/structured_event.hpp" -#include "../src/events/timing_event.hpp" -#include "../src/events/screen_view_event.hpp" +#include "../src/snowplow.hpp" using snowplow::ClientSession; using snowplow::Emitter; diff --git a/performance/main.cpp b/performance/main.cpp index 1a7ce90..c222969 100644 --- a/performance/main.cpp +++ b/performance/main.cpp @@ -14,8 +14,7 @@ See the Apache License Version 2.0 for the specific language governing permissio #include #include -#include "../src/storage.hpp" -#include "../src/utils.hpp" +#include "../src/snowplow.hpp" #include "run.hpp" using snowplow::SelfDescribingJson; diff --git a/performance/mock_client_session.hpp b/performance/mock_client_session.hpp index 100a400..f14a5c8 100644 --- a/performance/mock_client_session.hpp +++ b/performance/mock_client_session.hpp @@ -14,7 +14,7 @@ See the Apache License Version 2.0 for the specific language governing permissio #ifndef MOCK_CLIENT_SESSION_H #define MOCK_CLIENT_SESSION_H -#include "../src/client_session.hpp" +#include "../src/snowplow.hpp" using snowplow::ClientSession; using snowplow::SelfDescribingJson; diff --git a/performance/mock_emitter.hpp b/performance/mock_emitter.hpp index cf09db6..d227911 100644 --- a/performance/mock_emitter.hpp +++ b/performance/mock_emitter.hpp @@ -14,7 +14,7 @@ See the Apache License Version 2.0 for the specific language governing permissio #ifndef MOCK_EMITTER_H #define MOCK_EMITTER_H -#include "../src/emitter.hpp" +#include "../src/snowplow.hpp" using snowplow::Emitter; using snowplow::Payload; diff --git a/performance/mute_emitter.hpp b/performance/mute_emitter.hpp index 13a9953..ee62f96 100644 --- a/performance/mute_emitter.hpp +++ b/performance/mute_emitter.hpp @@ -14,7 +14,7 @@ See the Apache License Version 2.0 for the specific language governing permissio #ifndef MUTE_EMITTER_H #define MUTE_EMITTER_H -#include "../src/emitter.hpp" +#include "../src/snowplow.hpp" using snowplow::Emitter; using std::string; diff --git a/performance/run.cpp b/performance/run.cpp index 88d56f5..7fc106c 100644 --- a/performance/run.cpp +++ b/performance/run.cpp @@ -14,11 +14,7 @@ See the Apache License Version 2.0 for the specific language governing permissio #include #include -#include "../src/subject.hpp" -#include "../src/tracker.hpp" -#include "../src/events/structured_event.hpp" -#include "../src/events/timing_event.hpp" -#include "../src/events/screen_view_event.hpp" +#include "../src/snowplow.hpp" #include "mock_client_session.hpp" #include "mock_emitter.hpp" #include "mute_emitter.hpp" diff --git a/src/snowplow.hpp b/src/snowplow.hpp new file mode 100644 index 0000000..cb2a970 --- /dev/null +++ b/src/snowplow.hpp @@ -0,0 +1,43 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#ifndef SNOWPLOW_H +#define SNOWPLOW_H + +/** + * @brief This file provides the single header to import when using the Snowplow tracker in your code. + */ + +#include "client_session.hpp" +#include "emitter.hpp" +#include "storage.hpp" +#include "subject.hpp" +#include "tracker.hpp" + +// http +#include "http/http_client.hpp" +#include "http/http_client_apple.hpp" +#include "http/http_client_curl.hpp" +#include "http/http_client_windows.hpp" + +// payload +#include "payload/self_describing_json.hpp" + +// events +#include "events/event.hpp" +#include "events/screen_view_event.hpp" +#include "events/self_describing_event.hpp" +#include "events/structured_event.hpp" +#include "events/timing_event.hpp" + +#endif From 9647c54f1bea38a94b6764e7341c9fa09421606e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=BA=C5=A1=20Tomlein?= Date: Tue, 19 Apr 2022 11:55:54 +0200 Subject: [PATCH 07/35] Add emitter callback function (close #7) PR #59 --- examples/main.cpp | 16 +++ performance/logs.txt | 1 + src/emitter.cpp | 182 +++++++++++++++++-------- src/emitter.hpp | 47 +++++-- src/http/http_request_result.cpp | 59 ++++---- src/http/http_request_result.hpp | 21 +-- test/emitter_test.cpp | 127 +++++++++++++---- test/http/http_request_result_test.cpp | 24 +++- test/http/test_http_client.cpp | 17 ++- test/http/test_http_client.hpp | 5 + 10 files changed, 370 insertions(+), 129 deletions(-) diff --git a/examples/main.cpp b/examples/main.cpp index bc36435..a4fdced 100644 --- a/examples/main.cpp +++ b/examples/main.cpp @@ -6,6 +6,7 @@ using snowplow::ClientSession; using snowplow::Emitter; +using snowplow::EmitStatus; using snowplow::Subject; using snowplow::Tracker; using snowplow::StructuredEvent; @@ -30,6 +31,21 @@ int main(int argc, char **argv) { string db_name = "demo.db"; Emitter emitter(uri, Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 52000, db_name); + emitter.set_request_callback( + [](list event_ids, EmitStatus emit_status) { + switch (emit_status) { + case EmitStatus::SUCCESS: + printf("Successfuly sent %lu events.\n", event_ids.size()); + break; + case EmitStatus::FAILED_WILL_RETRY: + printf("Failed to send %lu events, but will retry.\n", event_ids.size()); + break; + case EmitStatus::FAILED_WONT_RETRY: + printf("Failed to send %lu events and won't retry.\n", event_ids.size()); + break; + } + }, + EmitStatus::SUCCESS | EmitStatus::FAILED_WILL_RETRY | EmitStatus::FAILED_WONT_RETRY); Subject subject; subject.set_user_id("a-user-id"); diff --git a/performance/logs.txt b/performance/logs.txt index 1ed166c..c9da94a 100644 --- a/performance/logs.txt +++ b/performance/logs.txt @@ -6,3 +6,4 @@ {"desktop_context":{"data":{"deviceManufacturer":"Apple Inc.","deviceModel":"MacBookPro17,1","deviceProcessorCount":8,"osIs64Bit":true,"osServicePack":"","osType":"macOS","osVersion":"12.2.0"},"schema":"iglu:com.snowplowanalytics.snowplow/desktop_context/jsonschema/1-0-0"},"results":{"mocked_emitter_and_mocked_session":5.742478167,"mocked_emitter_and_real_session":5.050215375,"mute_emitter_and_mocked_session":25.8688495,"mute_emitter_and_real_session":19.645383417,"num_operations":10000,"num_threads":5},"timestamp":1645041474207,"tracker_version":"cpp-0.1.0"} {"desktop_context":{"data":{"deviceManufacturer":"Apple Inc.","deviceModel":"MacBookPro17,1","deviceProcessorCount":8,"osIs64Bit":true,"osServicePack":"","osType":"macOS","osVersion":"12.2.0"},"schema":"iglu:com.snowplowanalytics.snowplow/desktop_context/jsonschema/1-0-0"},"results":{"mocked_emitter_and_mocked_session":6.493122875,"mocked_emitter_and_real_session":6.679922959,"mute_emitter_and_mocked_session":18.915125958,"mute_emitter_and_real_session":17.245701792,"num_operations":10000,"num_threads":5},"timestamp":1648794323772,"tracker_version":"cpp-0.2.0"} {"desktop_context":{"data":{"deviceManufacturer":"Apple Inc.","deviceModel":"MacBookPro17,1","deviceProcessorCount":8,"osIs64Bit":true,"osServicePack":"","osType":"macOS","osVersion":"12.2.0"},"schema":"iglu:com.snowplowanalytics.snowplow/desktop_context/jsonschema/1-0-0"},"results":{"mocked_emitter_and_mocked_session":6.519524667,"mocked_emitter_and_real_session":6.452258541,"mute_emitter_and_mocked_session":21.564418917,"mute_emitter_and_real_session":18.91465825,"num_operations":10000,"num_threads":5},"timestamp":1649249418985,"tracker_version":"cpp-0.2.0"} +{"desktop_context":{"data":{"deviceManufacturer":"Apple Inc.","deviceModel":"MacBookPro17,1","deviceProcessorCount":8,"osIs64Bit":true,"osServicePack":"","osType":"macOS","osVersion":"12.2.0"},"schema":"iglu:com.snowplowanalytics.snowplow/desktop_context/jsonschema/1-0-0"},"results":{"mocked_emitter_and_mocked_session":6.459520667,"mocked_emitter_and_real_session":6.436076,"mute_emitter_and_mocked_session":21.589996458,"mute_emitter_and_real_session":19.414266,"num_operations":10000,"num_threads":5},"timestamp":1649861294219,"tracker_version":"cpp-0.2.0"} diff --git a/src/emitter.cpp b/src/emitter.cpp index 2600093..f6513fc 100644 --- a/src/emitter.cpp +++ b/src/emitter.cpp @@ -19,6 +19,12 @@ using std::lock_guard; using std::stringstream; using std::unique_lock; using std::unique_ptr; +using std::async; +using std::to_string; +using std::transform; +using std::equal; +using std::move; +using std::future; const int post_wrapper_bytes = 88; // "schema":"iglu:com.snowplowanalytics.snowplow/payload_data/jsonschema/1-0-4","data":[] const int post_stm_bytes = 22; // "stm":"1443452851000" @@ -56,10 +62,10 @@ Emitter::Emitter(const string &uri, Method method, Protocol protocol, int send_l string expected_http = "http://"; string expected_https = "https://"; string actual_uri_lower = uri; - std::transform(actual_uri_lower.begin(), actual_uri_lower.end(), actual_uri_lower.begin(), ::tolower); + transform(actual_uri_lower.begin(), actual_uri_lower.end(), actual_uri_lower.begin(), ::tolower); - if ((expected_http.size() <= actual_uri_lower.size() && std::equal(expected_http.begin(), expected_http.end(), actual_uri_lower.begin())) || - (expected_https.size() <= actual_uri_lower.size() && std::equal(expected_https.begin(), expected_https.end(), actual_uri_lower.begin()))) { + if ((expected_http.size() <= actual_uri_lower.size() && equal(expected_http.begin(), expected_http.end(), actual_uri_lower.begin())) || + (expected_https.size() <= actual_uri_lower.size() && equal(expected_https.begin(), expected_https.end(), actual_uri_lower.begin()))) { throw invalid_argument("FATAL: Emitter URI (" + uri + ") must not start with http:// or https://"); } @@ -72,7 +78,7 @@ Emitter::Emitter(const string &uri, Method method, Protocol protocol, int send_l this->m_send_limit = send_limit; this->m_byte_limit_post = byte_limit_post; this->m_byte_limit_get = byte_limit_get; - this->m_http_client = std::move(http_client); + this->m_http_client = move(http_client); } Emitter::~Emitter() { @@ -84,12 +90,10 @@ Emitter::~Emitter() { void Emitter::start() { unique_lock locker(this->m_run_check); if (this->m_running) { - locker.unlock(); // refuse to start more than once - return; + return; // refuse to start more than once } this->m_running = true; this->m_daemon_thread = thread(&Emitter::run, this); - locker.unlock(); } void Emitter::stop() { @@ -100,8 +104,6 @@ void Emitter::stop() { this->m_check_db.notify_all(); this->m_daemon_thread.join(); - } else { - locker.unlock(); } } @@ -113,7 +115,6 @@ void Emitter::add(Payload payload) { void Emitter::flush() { unique_lock locker_1(this->m_run_check); if (this->m_running == false) { - locker_1.unlock(); return; } locker_1.unlock(); @@ -130,108 +131,161 @@ void Emitter::flush() { // --- Private void Emitter::run() { - list *event_rows = new list; - list *results = new list; - list *success_ids = new list; - do { - Storage::instance()->select_event_row_range(event_rows, this->m_send_limit); - - if (event_rows->size() > 0) { - this->do_send(event_rows, results); - - for (list::iterator it = results->begin(); it != results->end(); ++it) { - list res_row_ids = it->get_row_ids(); - if (it->is_success()) { - success_ids->splice(success_ids->end(), res_row_ids); + list event_rows; + Storage::instance()->select_event_row_range(&event_rows, m_send_limit); + + if (event_rows.size() > 0) { + // emit the events + list results; + do_send(event_rows, &results); + + // classify results into successful and failed + list success_row_ids; + list failed_will_retry_row_ids; + list failed_wont_retry_row_ids; + for (auto const &result : results) { + auto res_row_ids = result.get_row_ids(); + if (result.is_success()) { + success_row_ids.splice(success_row_ids.end(), res_row_ids); + } else if (result.should_retry()) { + failed_will_retry_row_ids.splice(failed_will_retry_row_ids.end(), res_row_ids); + } else { + failed_wont_retry_row_ids.splice(failed_wont_retry_row_ids.end(), res_row_ids); } } - Storage::instance()->delete_event_row_ids(success_ids); - // Reset collections - event_rows->clear(); - results->clear(); - success_ids->clear(); + // trigger callbacks if enabled + trigger_callbacks(success_row_ids, failed_will_retry_row_ids, failed_wont_retry_row_ids, event_rows); + + // delete rows with successfully sent events and failed events that should not be retried + list delete_row_ids; + delete_row_ids.splice(delete_row_ids.end(), success_row_ids); + delete_row_ids.splice(delete_row_ids.end(), failed_wont_retry_row_ids); + Storage::instance()->delete_event_row_ids(&delete_row_ids); } else { - this->m_check_fin.notify_all(); + m_check_fin.notify_all(); - unique_lock locker(this->m_db_select); - this->m_check_db.wait_for(locker, std::chrono::seconds(5)); - locker.unlock(); + // if there are no events to send, sleep for a while + unique_lock locker(m_db_select); + m_check_db.wait_for(locker, std::chrono::seconds(5)); } - } while (this->is_running()); - - delete (event_rows); - delete (results); - delete (success_ids); + } while (is_running()); } -void Emitter::do_send(list *event_rows, list *results) { - list> request_futures; +void Emitter::do_send(const list &event_rows, list *results) { + list> request_futures; // Send each request in its own thread if (this->m_method == GET) { - for (list::iterator it = event_rows->begin(); it != event_rows->end(); ++it) { - Payload event_payload = it->event; - event_payload.add(SNOWPLOW_SENT_TIMESTAMP, std::to_string(Utils::get_unix_epoch_ms())); + for (auto const &row : event_rows) { + Payload event_payload = row.event; + event_payload.add(SNOWPLOW_SENT_TIMESTAMP, to_string(Utils::get_unix_epoch_ms())); string query_string = Utils::map_to_query_string(event_payload.get()); - list row_id = {it->id}; + list row_id = {row.id}; - request_futures.push_back(std::async(&HttpClient::http_get, this->m_http_client.get(), this->m_url, query_string, row_id, (query_string.size() > this->m_byte_limit_get))); - request_futures.push_back(std::async(&HttpClient::http_get, this->m_http_client.get(), this->m_url, query_string, row_id, (query_string.size() > this->m_byte_limit_get))); + request_futures.push_back(async(&HttpClient::http_get, this->m_http_client.get(), this->m_url, query_string, row_id, (query_string.size() > this->m_byte_limit_get))); } } else { list row_ids; list payloads; int total_byte_size = 0; - for (list::iterator it = event_rows->begin(); it != event_rows->end(); ++it) { - unsigned int byte_size = Utils::serialize_payload(it->event).size() + post_stm_bytes; + for (auto const &row : event_rows) { + unsigned int byte_size = Utils::serialize_payload(row.event).size() + post_stm_bytes; if ((byte_size + post_wrapper_bytes) > this->m_byte_limit_post) { // A single payload has exceeded the Byte Limit - list single_row_id = {it->id}; - list single_payload = {it->event}; - request_futures.push_back(std::async(&HttpClient::http_post, this->m_http_client.get(), this->m_url, this->build_post_data_json(single_payload), single_row_id, true)); + list single_row_id = {row.id}; + list single_payload = {row.event}; + request_futures.push_back(async(&HttpClient::http_post, this->m_http_client.get(), this->m_url, this->build_post_data_json(single_payload), single_row_id, true)); single_row_id.clear(); single_payload.clear(); } else if ((total_byte_size + byte_size + post_wrapper_bytes + (payloads.size() - 1)) > this->m_byte_limit_post) { // Byte limit reached - request_futures.push_back(std::async(&HttpClient::http_post, this->m_http_client.get(), this->m_url, this->build_post_data_json(payloads), row_ids, false)); + request_futures.push_back(async(&HttpClient::http_post, this->m_http_client.get(), this->m_url, this->build_post_data_json(payloads), row_ids, false)); // Reset accumulators row_ids.clear(); - row_ids = {it->id}; + row_ids = {row.id}; payloads.clear(); - payloads = {it->event}; + payloads = {row.event}; total_byte_size = byte_size; } else { - row_ids.push_back(it->id); - payloads.push_back(it->event); + row_ids.push_back(row.id); + payloads.push_back(row.event); total_byte_size += byte_size; } } if (payloads.size() > 0) { - request_futures.push_back(std::async(&HttpClient::http_post, this->m_http_client.get(), this->m_url, this->build_post_data_json(payloads), row_ids, false)); + request_futures.push_back(async(&HttpClient::http_post, this->m_http_client.get(), this->m_url, this->build_post_data_json(payloads), row_ids, false)); } } // Grab all the request results and return - for (list>::iterator it = request_futures.begin(); it != request_futures.end(); ++it) { + for (auto it = request_futures.begin(); it != request_futures.end(); ++it) { results->push_back(it->get()); } request_futures.clear(); } +void Emitter::trigger_callbacks(const list &success_row_ids, const list &failed_will_retry_row_ids, const list &failed_wont_retry_row_ids, const list &event_rows) const { + if (!m_callback) { + return; + } + + // create a mapping table and function between row IDs and event IDs + map event_ids_for_row_ids; + for (auto const &row : event_rows) { + auto payload = row.event.get(); + auto it = payload.find(SNOWPLOW_EID); + if (it != payload.end()) { + event_ids_for_row_ids.insert({row.id, it->second}); + } + } + auto transform_row_ids_to_event_ids = [&](const list &row_ids) { + list event_ids; + for (auto const &row_id : row_ids) { + auto it = event_ids_for_row_ids.find(row_id); + if (it != event_ids_for_row_ids.end()) { + event_ids.push_back(it->second); + } + } + return event_ids; + }; + + // execute callback for successful events + if ((m_callback_emit_status & SUCCESS) && !success_row_ids.empty()) { + list success_event_ids = transform_row_ids_to_event_ids(success_row_ids); + execute_callback(success_event_ids, SUCCESS); + } + + // execute callback for failed events that will be retried + if ((m_callback_emit_status & FAILED_WILL_RETRY) && !failed_will_retry_row_ids.empty()) { + list failed_will_retry_event_ids = transform_row_ids_to_event_ids(failed_will_retry_row_ids); + execute_callback(failed_will_retry_event_ids, FAILED_WILL_RETRY); + } + + // execute callback for failed events that won't be retried + if ((m_callback_emit_status & FAILED_WONT_RETRY) && !failed_wont_retry_row_ids.empty()) { + list failed_wont_retry_event_ids = transform_row_ids_to_event_ids(failed_wont_retry_row_ids); + execute_callback(failed_wont_retry_event_ids, FAILED_WONT_RETRY); + } +} + +void Emitter::execute_callback(const list &event_ids, EmitStatus emit_status) const { + thread(m_callback, event_ids, emit_status).detach(); +} + // --- Helpers string Emitter::build_post_data_json(list payload_list) { json data_array = json::array(); // Add 'stm' to each payload - string stm = std::to_string(Utils::get_unix_epoch_ms()); + string stm = to_string(Utils::get_unix_epoch_ms()); for (list::iterator it = payload_list.begin(); it != payload_list.end(); ++it) { it->add(SNOWPLOW_SENT_TIMESTAMP, stm); data_array.push_back(it->get()); @@ -242,7 +296,7 @@ string Emitter::build_post_data_json(list payload_list) { return post_envelope.to_string(); } -string Emitter::get_collector_url(const string &uri, Protocol protocol, Method method) { +string Emitter::get_collector_url(const string &uri, Protocol protocol, Method method) const { stringstream url; url << (protocol == HTTP ? "http" : "https") << "://" << uri; url << "/" << (method == GET ? SNOWPLOW_GET_PROTOCOL_PATH : SNOWPLOW_POST_PROTOCOL_VENDOR + "/" + SNOWPLOW_POST_PROTOCOL_VERSION); @@ -253,3 +307,13 @@ bool Emitter::is_running() { lock_guard guard(this->m_run_check); return this->m_running; } + +void Emitter::set_request_callback(const EmitterCallback &callback, EmitStatus emit_status) { + lock_guard guard(this->m_run_check); + if (m_running) { + throw std::logic_error("Not allowed when Emitter is running"); + } + + m_callback_emit_status = emit_status; + m_callback = callback; +} diff --git a/src/emitter.hpp b/src/emitter.hpp index 8f67cbc..547e701 100644 --- a/src/emitter.hpp +++ b/src/emitter.hpp @@ -34,8 +34,22 @@ using std::thread; using std::condition_variable; using std::mutex; using std::unique_ptr; +using std::list; namespace snowplow { + +enum EmitStatus { + SUCCESS = 1, + FAILED_WILL_RETRY = 2, + FAILED_WONT_RETRY = 4 +}; + +inline EmitStatus operator|(EmitStatus a, EmitStatus b) { + return static_cast(static_cast(a) | static_cast(b)); +} + +typedef std::function, EmitStatus)> EmitterCallback; + /** * @brief Emitter is responsible for sending events to a Snowplow Collector. * @@ -133,35 +147,35 @@ class Emitter { * * @return CrackedUrl */ - CrackedUrl get_cracked_url() { return m_url; } + CrackedUrl get_cracked_url() const { return m_url; } /** * @brief Get the HTTP method. * * @return Method HTTP method used for sending events to Collector */ - Method get_method() { return m_method; } + Method get_method() const { return m_method; } /** * @brief Get the send limit. * * @return unsigned int The maximum amount of events to send at a time */ - unsigned int get_send_limit() { return m_send_limit; } + unsigned int get_send_limit() const { return m_send_limit; } /** * @brief Get the byte limit for GET. * * @return unsigned int The byte limit when sending a GET request */ - unsigned int get_byte_limit_get() { return m_byte_limit_get; } + unsigned int get_byte_limit_get() const { return m_byte_limit_get; } /** * @brief Get the byte limit for POST. * * @return unsigned int The byte limit when sending a POST request */ - unsigned int get_byte_limit_post() { return m_byte_limit_post; } + unsigned int get_byte_limit_post() const { return m_byte_limit_post; } /** * @brief Check if the Emitter is started. @@ -170,6 +184,19 @@ class Emitter { */ bool is_running(); + /** + * @brief Set a callback to call after emit requests are made with the resulting emit status. + * + * To subscribe to multiple emit statuses, use binary operations such as `EmitStatus::FAILED_WILL_RETRY | EmitStatus::FAILED_WONT_RETRY`. + * Calling this function overwrites any previously set callbacks. + * The callback can't be changed when the Emitter is running. + * The callback will be fired in a new thread. + * + * @param callback Callback function + * @param emit_status Emit status to trigger the callback for + */ + void set_request_callback(const EmitterCallback &callback, EmitStatus emit_status); + private: CrackedUrl m_url; Method m_method; @@ -185,12 +212,16 @@ class Emitter { mutex m_db_select; mutex m_run_check; bool m_running; + EmitterCallback m_callback; + EmitStatus m_callback_emit_status; void run(); - void do_send(list* event_rows, list* results); + void do_send(const list &event_rows, list *results); string build_post_data_json(list payload_list); - string get_collector_url(const string & uri, Protocol protocol, Method method); + string get_collector_url(const string &uri, Protocol protocol, Method method) const; + void trigger_callbacks(const list &success_row_ids, const list &failed_will_retry_row_ids, const list &failed_wont_retry_row_ids, const list &event_rows) const; + void execute_callback(const list &event_ids, EmitStatus emit_status) const; }; -} +} // namespace snowplow #endif diff --git a/src/http/http_request_result.cpp b/src/http/http_request_result.cpp index a00dd95..5ae55ad 100644 --- a/src/http/http_request_result.cpp +++ b/src/http/http_request_result.cpp @@ -12,42 +12,53 @@ See the Apache License Version 2.0 for the specific language governing permissio */ #include "http_request_result.hpp" +#include "../constants.hpp" using namespace snowplow; HttpRequestResult::HttpRequestResult() { - this->m_http_response_code = 0; - this->m_internal_error_code = 0; - this->m_row_ids = {}; - this->m_is_successful = false; + m_is_oversize = false; + m_internal_error_code = 0; // not an error + m_http_response_code = 0; // not success, should retry + m_row_ids = {}; } HttpRequestResult::HttpRequestResult(int internal_error_code, int http_response_code, list row_ids, bool oversize) { - if (oversize) { - this->m_internal_error_code = 0; - this->m_http_response_code = 200; - this->m_is_successful = true; - } else if (internal_error_code != 0) { - this->m_internal_error_code = internal_error_code; - this->m_http_response_code = -1; - this->m_is_successful = false; - } else { - this->m_internal_error_code = 0; - this->m_http_response_code = http_response_code; - this->m_is_successful = (this->m_http_response_code == 200); - } + m_is_oversize = oversize; + m_internal_error_code = internal_error_code; + m_http_response_code = internal_error_code != 0 ? -1 : http_response_code; + m_row_ids = row_ids; +} - this->m_row_ids = row_ids; +int HttpRequestResult::get_http_response_code() const { + return m_http_response_code; } -int HttpRequestResult::get_http_response_code() { - return this->m_http_response_code; +list HttpRequestResult::get_row_ids() const { + return m_row_ids; } -list HttpRequestResult::get_row_ids() { - return this->m_row_ids; +bool HttpRequestResult::is_internal_error() const { + return m_internal_error_code != 0; +} + +bool HttpRequestResult::is_success() const { + if (is_internal_error()) { + return false; + } + return (get_http_response_code() >= 200 && get_http_response_code() < 300); } -bool HttpRequestResult::is_success() { - return this->m_is_successful; +bool HttpRequestResult::should_retry() const { + // don't retry if successful + if (is_success()) { + return false; + } + + // don't retry if request is larger than max byte limit + if (m_is_oversize) { + return false; + } + + return true; } diff --git a/src/http/http_request_result.hpp b/src/http/http_request_result.hpp index ee1a4a7..1b67545 100644 --- a/src/http/http_request_result.hpp +++ b/src/http/http_request_result.hpp @@ -16,26 +16,31 @@ See the Apache License Version 2.0 for the specific language governing permissio #include #include +#include using std::list; +using std::map; namespace snowplow { /** * @brief Response from HTTP requests to collector. To be used internally within tracker only. */ class HttpRequestResult { +public: + HttpRequestResult(); + HttpRequestResult(int internal_error_code, int http_response_code, list row_ids, bool oversize); + int get_http_response_code() const; + list get_row_ids() const; + bool is_success() const; + bool should_retry() const; + private: + bool is_internal_error() const; + int m_http_response_code; int m_internal_error_code; list m_row_ids; - bool m_is_successful; - -public: - HttpRequestResult(); - HttpRequestResult(int internal_error_code, int http_response_code, list row_ids, bool oversize); - int get_http_response_code(); - list get_row_ids(); - bool is_success(); + bool m_is_oversize; }; } // namespace snowplow diff --git a/test/emitter_test.cpp b/test/emitter_test.cpp index 414b290..d79ab36 100644 --- a/test/emitter_test.cpp +++ b/test/emitter_test.cpp @@ -12,12 +12,27 @@ See the Apache License Version 2.0 for the specific language governing permissio */ #include "../src/emitter.hpp" -#include "http/test_http_client.hpp" +#include "../src/payload/event_payload.hpp" #include "catch.hpp" +#include "http/test_http_client.hpp" using namespace snowplow; using std::invalid_argument; +using std::make_tuple; +using std::tuple; using std::unique_ptr; +using std::vector; +using std::this_thread::sleep_for; +using std::chrono::milliseconds; + +string track_sample_event(Emitter &emitter) { + emitter.start(); + EventPayload payload; + payload.add("e", "pv"); + emitter.add(payload); + emitter.flush(); + return payload.get_event_id(); +} TEST_CASE("emitter") { SECTION("Emitter rejects urls (starting with http:// or https://)") { @@ -109,16 +124,16 @@ TEST_CASE("emitter") { } SECTION("Emitter should track and remove only successful events from the database for GET requests") { - Emitter e("com.acme.collector", Emitter::Method::GET, Emitter::Protocol::HTTPS, 500, 52000, 52000, "test-emitter.db", unique_ptr(new TestHttpClient())); - e.start(); + Emitter emitter("com.acme.collector", Emitter::Method::GET, Emitter::Protocol::HTTPS, 500, 52000, 52000, "test-emitter.db", unique_ptr(new TestHttpClient())); + emitter.start(); - Payload p; - p.add("e", "pv"); + Payload payload; + payload.add("e", "pv"); for (int i = 0; i < 10; i++) { - e.add(p); + emitter.add(payload); } - e.flush(); + emitter.flush(); list requests = TestHttpClient::get_requests_list(); REQUIRE(0 != requests.size()); @@ -129,10 +144,10 @@ TEST_CASE("emitter") { event_list->clear(); TestHttpClient::set_http_response_code(404); - e.start(); + emitter.start(); for (int i = 0; i < 10; i++) { - e.add(p); + emitter.add(payload); } event_list = new list; @@ -140,22 +155,22 @@ TEST_CASE("emitter") { REQUIRE(10 == event_list->size()); event_list->clear(); - e.stop(); + emitter.stop(); TestHttpClient::reset(); delete (event_list); } SECTION("Emitter should track and remove only successful events from the database for POST requests") { - Emitter e("com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 500, 500, "test-emitter.db", unique_ptr(new TestHttpClient())); - e.start(); + Emitter emitter("com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 500, 500, "test-emitter.db", unique_ptr(new TestHttpClient())); + emitter.start(); - Payload p; - p.add("e", "pv"); + Payload payload; + payload.add("e", "pv"); for (int i = 0; i < 10; i++) { - e.add(p); + emitter.add(payload); } - e.flush(); + emitter.flush(); list requests = TestHttpClient::get_requests_list(); REQUIRE(0 != requests.size()); @@ -167,10 +182,10 @@ TEST_CASE("emitter") { // Test POST 404 response TestHttpClient::set_http_response_code(404); - e.start(); + emitter.start(); for (int i = 0; i < 10; i++) { - e.add(p); + emitter.add(payload); } event_list = new list; @@ -178,15 +193,15 @@ TEST_CASE("emitter") { REQUIRE(10 == event_list->size()); event_list->clear(); - e.stop(); + emitter.stop(); TestHttpClient::reset(); // Test POST combination logic for (int i = 0; i < 1000; i++) { - e.add(p); + emitter.add(payload); } - e.start(); - e.flush(); + emitter.start(); + emitter.flush(); event_list = new list; Storage::instance()->select_all_event_rows(event_list); @@ -194,12 +209,12 @@ TEST_CASE("emitter") { event_list->clear(); // Test POST single event too large logic - p.add("tv", "pvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpv"); - e.add(p); + payload.add("tv", "pvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpvpv"); + emitter.add(payload); TestHttpClient::set_http_response_code(404); - e.start(); - e.flush(); + emitter.start(); + emitter.flush(); event_list = new list; Storage::instance()->select_all_event_rows(event_list); @@ -209,4 +224,64 @@ TEST_CASE("emitter") { TestHttpClient::reset(); delete (event_list); } + + SECTION("triggers callback for all emit statuses") { + Emitter emitter("com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 500, 500, "test-emitter.db", unique_ptr(new TestHttpClient())); + vector, EmitStatus>> calls; + emitter.set_request_callback( + [&](list event_ids, EmitStatus status) { + calls.push_back(make_tuple(event_ids, status)); + }, + EmitStatus::SUCCESS | EmitStatus::FAILED_WILL_RETRY | EmitStatus::FAILED_WONT_RETRY); + + // calls for successful event + TestHttpClient::set_http_response_code(200); + string event_id = track_sample_event(emitter); + sleep_for(milliseconds(500)); + REQUIRE(1 == calls.size()); + REQUIRE(1 == std::get<0>(calls[0]).size()); + REQUIRE(event_id == std::get<0>(calls[0]).front()); + REQUIRE(EmitStatus::SUCCESS == std::get<1>(calls[0])); + calls.clear(); + TestHttpClient::reset(); + + // calls for failed with retry event + TestHttpClient::set_temporary_response_code(500); + event_id = track_sample_event(emitter); + sleep_for(milliseconds(500)); + REQUIRE(2 == calls.size()); + REQUIRE(event_id == std::get<0>(calls[0]).front()); + REQUIRE(event_id == std::get<0>(calls[1]).front()); + REQUIRE(EmitStatus::FAILED_WILL_RETRY == std::get<1>(calls[0])); + REQUIRE(EmitStatus::SUCCESS == std::get<1>(calls[1])); + calls.clear(); + TestHttpClient::reset(); + } + + SECTION("doesn't trigger callbacks for not subscribed emit statuses") { + Emitter emitter("com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 500, 500, "test-emitter.db", unique_ptr(new TestHttpClient())); + vector, EmitStatus>> calls; + emitter.set_request_callback( + [&](list event_ids, EmitStatus status) { + calls.push_back(make_tuple(event_ids, status)); + }, + EmitStatus::FAILED_WILL_RETRY); + + // doesn't call for successful event + TestHttpClient::set_http_response_code(200); + track_sample_event(emitter); + sleep_for(milliseconds(500)); + REQUIRE(0 == calls.size()); + TestHttpClient::reset(); + + // doesn't call for failed with retry event + TestHttpClient::set_temporary_response_code(500); + string event_id = track_sample_event(emitter); + sleep_for(milliseconds(500)); + REQUIRE(1 == calls.size()); + REQUIRE(1 == calls.size()); + REQUIRE(event_id == std::get<0>(calls[0]).front()); + REQUIRE(EmitStatus::FAILED_WILL_RETRY == std::get<1>(calls[0])); + TestHttpClient::reset(); + } } diff --git a/test/http/http_request_result_test.cpp b/test/http/http_request_result_test.cpp index 257fe63..fe77757 100644 --- a/test/http/http_request_result_test.cpp +++ b/test/http/http_request_result_test.cpp @@ -33,10 +33,28 @@ TEST_CASE("http_request_result") { REQUIRE(httpRequestResult.get_http_response_code() == 999); } - SECTION("the http_response_code and error_code should be 200 and 0 when oversized") { - HttpRequestResult httpRequestResult(123, 404, list(), true); - REQUIRE(httpRequestResult.get_http_response_code() == 200); + SECTION("should not retry if success") { + HttpRequestResult httpRequestResult(0, 200, list(), true); REQUIRE(httpRequestResult.is_success() == true); + REQUIRE(httpRequestResult.should_retry() == false); + } + + SECTION("should not retry if oversized") { + HttpRequestResult httpRequestResult(0, 500, list(), true); + REQUIRE(httpRequestResult.is_success() == false); + REQUIRE(httpRequestResult.should_retry() == false); + } + + SECTION("should retry for internal errors") { + HttpRequestResult httpRequestResult(1, 200, list(), false); + REQUIRE(httpRequestResult.is_success() == false); + REQUIRE(httpRequestResult.should_retry() == true); + } + + SECTION("should retry for 5xx status codes") { + HttpRequestResult httpRequestResult(0, 501, list(), false); + REQUIRE(httpRequestResult.is_success() == false); + REQUIRE(httpRequestResult.should_retry() == true); } SECTION("the default constructor should return nothing for getter functions") { diff --git a/test/http/test_http_client.cpp b/test/http/test_http_client.cpp index c9f456f..deb881e 100644 --- a/test/http/test_http_client.cpp +++ b/test/http/test_http_client.cpp @@ -23,6 +23,7 @@ const string TestHttpClient::TRACKER_AGENT = string("Snowplow C++ Tracker (Integ list TestHttpClient::requests_list; mutex TestHttpClient::log_read_write; int TestHttpClient::response_code = 200; +int TestHttpClient::temporary_response_code = -1; HttpRequestResult TestHttpClient::http_request(const RequestMethod method, CrackedUrl url, const string &query_string, const string &post_data, list row_ids, bool oversize) { lock_guard guard(log_read_write); @@ -35,7 +36,7 @@ HttpRequestResult TestHttpClient::http_request(const RequestMethod method, Crack r.oversize = oversize; requests_list.push_back(r); - return HttpRequestResult(0, response_code, row_ids, oversize); + return HttpRequestResult(0, fetch_response_code(), row_ids, oversize); } void TestHttpClient::set_http_response_code(int http_response_code) { @@ -43,6 +44,20 @@ void TestHttpClient::set_http_response_code(int http_response_code) { response_code = http_response_code; } +void TestHttpClient::set_temporary_response_code(int http_response_code) { + lock_guard guard(log_read_write); + temporary_response_code = http_response_code; +} + +int TestHttpClient::fetch_response_code() { + if (temporary_response_code >= 0) { + int code = temporary_response_code; + temporary_response_code = -1; + return code; + } + return response_code; +} + list TestHttpClient::get_requests_list() { lock_guard guard(log_read_write); return requests_list; diff --git a/test/http/test_http_client.hpp b/test/http/test_http_client.hpp index 26f569b..3ab54dd 100644 --- a/test/http/test_http_client.hpp +++ b/test/http/test_http_client.hpp @@ -43,14 +43,19 @@ class TestHttpClient : public HttpClient { static list requests_list; static int response_code; + static int temporary_response_code; static mutex log_read_write; static void set_http_response_code(int http_response_code); + static void set_temporary_response_code(int http_response_code); static list get_requests_list(); static void reset(); protected: HttpRequestResult http_request(const RequestMethod method, const CrackedUrl url, const string & query_string, const string & post_data, list row_ids, bool oversize); + +private: + static int fetch_response_code(); }; } From e3a6927ddc793f4eb734da30dbc415c09eacf068 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=BA=C5=A1=20Tomlein?= Date: Tue, 19 Apr 2022 12:04:35 +0200 Subject: [PATCH 08/35] Add customizable no-retry HTTP status codes (close #54) PR #60 --- src/constants.hpp | 3 ++ src/emitter.cpp | 14 ++++++- src/emitter.hpp | 12 ++++++ src/http/http_request_result.cpp | 16 +++++++- src/http/http_request_result.hpp | 2 +- test/emitter_test.cpp | 55 ++++++++++++++++++++++++-- test/http/http_request_result_test.cpp | 27 +++++++++++-- 7 files changed, 118 insertions(+), 11 deletions(-) diff --git a/src/constants.hpp b/src/constants.hpp index c243805..b45a723 100644 --- a/src/constants.hpp +++ b/src/constants.hpp @@ -15,8 +15,10 @@ See the Apache License Version 2.0 for the specific language governing permissio #define CONSTANTS_H #include +#include using std::string; +using std::set; namespace snowplow { const string SNOWPLOW_TRACKER_VERSION_LABEL = "cpp-0.2.0"; @@ -58,6 +60,7 @@ const string SNOWPLOW_TRACKER_VERSION = "tv"; const string SNOWPLOW_APP_ID = "aid"; const string SNOWPLOW_SP_NAMESPACE = "tna"; const string SNOWPLOW_PLATFORM = "p"; +const set SNOWPLOW_FAIL_NO_RETRY_HTTP_STATUS_CODES = {400, 401, 403, 410, 422}; // subject class const string SNOWPLOW_UID = "uid"; diff --git a/src/emitter.cpp b/src/emitter.cpp index f6513fc..d016300 100644 --- a/src/emitter.cpp +++ b/src/emitter.cpp @@ -148,7 +148,7 @@ void Emitter::run() { auto res_row_ids = result.get_row_ids(); if (result.is_success()) { success_row_ids.splice(success_row_ids.end(), res_row_ids); - } else if (result.should_retry()) { + } else if (result.should_retry(m_custom_retry_for_status_codes)) { failed_will_retry_row_ids.splice(failed_will_retry_row_ids.end(), res_row_ids); } else { failed_wont_retry_row_ids.splice(failed_wont_retry_row_ids.end(), res_row_ids); @@ -317,3 +317,15 @@ void Emitter::set_request_callback(const EmitterCallback &callback, EmitStatus e m_callback_emit_status = emit_status; m_callback = callback; } + +void Emitter::set_custom_retry_for_status_code(int http_status_code, bool retry) { + lock_guard guard(this->m_run_check); + if (m_running) { + throw std::logic_error("Not allowed when Emitter is running"); + } + if (http_status_code < 300) { + throw std::invalid_argument("Retry rules can only be set for status codes >= 300"); + } + + m_custom_retry_for_status_codes.insert({http_status_code, retry}); +} diff --git a/src/emitter.hpp b/src/emitter.hpp index 547e701..63222d6 100644 --- a/src/emitter.hpp +++ b/src/emitter.hpp @@ -197,6 +197,17 @@ class Emitter { */ void set_request_callback(const EmitterCallback &callback, EmitStatus emit_status); + /** + * @brief Set a custom retry rule for when the HTTP status code is received in emit response from Collector. + * + * This overrides default behavior for HTTP status codes greater than 300. + * The custom retry rules can't be changed when the Emitter is running. + * + * @param http_status_code HTTP status code + * @param retry Whether events should be retried or not + */ + void set_custom_retry_for_status_code(int http_status_code, bool retry); + private: CrackedUrl m_url; Method m_method; @@ -214,6 +225,7 @@ class Emitter { bool m_running; EmitterCallback m_callback; EmitStatus m_callback_emit_status; + map m_custom_retry_for_status_codes; void run(); void do_send(const list &event_rows, list *results); diff --git a/src/http/http_request_result.cpp b/src/http/http_request_result.cpp index 5ae55ad..8b14869 100644 --- a/src/http/http_request_result.cpp +++ b/src/http/http_request_result.cpp @@ -49,7 +49,7 @@ bool HttpRequestResult::is_success() const { return (get_http_response_code() >= 200 && get_http_response_code() < 300); } -bool HttpRequestResult::should_retry() const { +bool HttpRequestResult::should_retry(const map &custom_retry_for_status_codes) const { // don't retry if successful if (is_success()) { return false; @@ -60,5 +60,17 @@ bool HttpRequestResult::should_retry() const { return false; } - return true; + // retry if it was an internal error + if (is_internal_error()) { + return true; + } + + // status code has a custom retry rule + auto it = custom_retry_for_status_codes.find(get_http_response_code()); + if (it != custom_retry_for_status_codes.end()) { + return it->second; + } + + // retry if status code is not in the list of no-retry status codes + return SNOWPLOW_FAIL_NO_RETRY_HTTP_STATUS_CODES.find(get_http_response_code()) == SNOWPLOW_FAIL_NO_RETRY_HTTP_STATUS_CODES.end(); } diff --git a/src/http/http_request_result.hpp b/src/http/http_request_result.hpp index 1b67545..a789214 100644 --- a/src/http/http_request_result.hpp +++ b/src/http/http_request_result.hpp @@ -32,7 +32,7 @@ class HttpRequestResult { int get_http_response_code() const; list get_row_ids() const; bool is_success() const; - bool should_retry() const; + bool should_retry(const map &custom_retry_for_status_codes) const; private: bool is_internal_error() const; diff --git a/test/emitter_test.cpp b/test/emitter_test.cpp index d79ab36..131d61e 100644 --- a/test/emitter_test.cpp +++ b/test/emitter_test.cpp @@ -256,6 +256,15 @@ TEST_CASE("emitter") { REQUIRE(EmitStatus::SUCCESS == std::get<1>(calls[1])); calls.clear(); TestHttpClient::reset(); + + // calls for failed with no retry event + TestHttpClient::set_temporary_response_code(422); + event_id = track_sample_event(emitter); + sleep_for(milliseconds(500)); + REQUIRE(1 == calls.size()); + REQUIRE(event_id == std::get<0>(calls[0]).front()); + REQUIRE(EmitStatus::FAILED_WONT_RETRY == std::get<1>(calls[0])); + TestHttpClient::reset(); } SECTION("doesn't trigger callbacks for not subscribed emit statuses") { @@ -265,7 +274,7 @@ TEST_CASE("emitter") { [&](list event_ids, EmitStatus status) { calls.push_back(make_tuple(event_ids, status)); }, - EmitStatus::FAILED_WILL_RETRY); + EmitStatus::FAILED_WONT_RETRY); // doesn't call for successful event TestHttpClient::set_http_response_code(200); @@ -276,12 +285,52 @@ TEST_CASE("emitter") { // doesn't call for failed with retry event TestHttpClient::set_temporary_response_code(500); + track_sample_event(emitter); + sleep_for(milliseconds(500)); + REQUIRE(0 == calls.size()); + TestHttpClient::reset(); + + // calls for failed with no retry event + TestHttpClient::set_temporary_response_code(422); string event_id = track_sample_event(emitter); sleep_for(milliseconds(500)); REQUIRE(1 == calls.size()); - REQUIRE(1 == calls.size()); REQUIRE(event_id == std::get<0>(calls[0]).front()); - REQUIRE(EmitStatus::FAILED_WILL_RETRY == std::get<1>(calls[0])); + REQUIRE(EmitStatus::FAILED_WONT_RETRY == std::get<1>(calls[0])); + TestHttpClient::reset(); + } + + SECTION("Emitter should not retry failed events for no-retry status codes") { + Emitter emitter("com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 500, 500, "test-emitter.db", unique_ptr(new TestHttpClient())); + + TestHttpClient::set_http_response_code(200); // success, don't retry + track_sample_event(emitter); + REQUIRE(1 == TestHttpClient::get_requests_list().size()); + TestHttpClient::reset(); + + TestHttpClient::set_temporary_response_code(501); // retry + track_sample_event(emitter); + REQUIRE(2 == TestHttpClient::get_requests_list().size()); TestHttpClient::reset(); + + TestHttpClient::set_temporary_response_code(422); // don't retry for this code + track_sample_event(emitter); + REQUIRE(1 == TestHttpClient::get_requests_list().size()); + TestHttpClient::reset(); + + emitter.set_custom_retry_for_status_code(501, false); + emitter.set_custom_retry_for_status_code(422, true); + + TestHttpClient::set_temporary_response_code(501); // don't retry + track_sample_event(emitter); + REQUIRE(1 == TestHttpClient::get_requests_list().size()); + TestHttpClient::reset(); + + TestHttpClient::set_temporary_response_code(422); // retry + track_sample_event(emitter); + REQUIRE(2 == TestHttpClient::get_requests_list().size()); + TestHttpClient::reset(); + + emitter.stop(); } } diff --git a/test/http/http_request_result_test.cpp b/test/http/http_request_result_test.cpp index fe77757..8b1b9aa 100644 --- a/test/http/http_request_result_test.cpp +++ b/test/http/http_request_result_test.cpp @@ -36,25 +36,44 @@ TEST_CASE("http_request_result") { SECTION("should not retry if success") { HttpRequestResult httpRequestResult(0, 200, list(), true); REQUIRE(httpRequestResult.is_success() == true); - REQUIRE(httpRequestResult.should_retry() == false); + REQUIRE(httpRequestResult.should_retry(map()) == false); } SECTION("should not retry if oversized") { HttpRequestResult httpRequestResult(0, 500, list(), true); REQUIRE(httpRequestResult.is_success() == false); - REQUIRE(httpRequestResult.should_retry() == false); + REQUIRE(httpRequestResult.should_retry(map()) == false); } SECTION("should retry for internal errors") { HttpRequestResult httpRequestResult(1, 200, list(), false); REQUIRE(httpRequestResult.is_success() == false); - REQUIRE(httpRequestResult.should_retry() == true); + REQUIRE(httpRequestResult.should_retry(map()) == true); } SECTION("should retry for 5xx status codes") { HttpRequestResult httpRequestResult(0, 501, list(), false); REQUIRE(httpRequestResult.is_success() == false); - REQUIRE(httpRequestResult.should_retry() == true); + REQUIRE(httpRequestResult.should_retry(map()) == true); + } + + SECTION("should not retry for no-retry status codes") { + HttpRequestResult httpRequestResult(0, 422, list(), false); + REQUIRE(httpRequestResult.get_http_response_code() == 422); + REQUIRE(httpRequestResult.is_success() == false); + REQUIRE(httpRequestResult.should_retry(map()) == false); + } + + SECTION("should retry according to custom status code rules") { + map custom_rules; + custom_rules.insert({501, false}); + custom_rules.insert({422, true}); + + HttpRequestResult httpRequestResult1(0, 501, list(), false); + REQUIRE(httpRequestResult1.should_retry(custom_rules) == false); + + HttpRequestResult httpRequestResult2(0, 422, list(), false); + REQUIRE(httpRequestResult2.should_retry(custom_rules) == true); } SECTION("the default constructor should return nothing for getter functions") { From b45f52fa9c370a5b02fa91d8f90dad67cc8ec114 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=BA=C5=A1=20Tomlein?= Date: Wed, 20 Apr 2022 15:17:27 +0200 Subject: [PATCH 09/35] Expose an interface for storage and make it configurable (close #48) PR #67 --- README.md | 6 +- examples/main.cpp | 6 +- performance/logs.txt | 1 + performance/mock_client_session.hpp | 5 +- performance/mock_emitter.hpp | 5 +- performance/mute_emitter.hpp | 5 +- performance/run.cpp | 39 +++---- snowplow-cpp-tracker-example.vcxproj | 6 +- snowplow-cpp-tracker-example.vcxproj.filters | 10 +- snowplow-cpp-tracker.vcxproj | 8 +- snowplow-cpp-tracker.vcxproj.filters | 12 +- src/client_session.cpp | 27 ++--- src/client_session.hpp | 7 +- src/emitter.cpp | 20 ++-- src/emitter.hpp | 22 ++-- src/snowplow.hpp | 8 +- src/storage/event_row.hpp | 26 +++++ src/storage/event_store.hpp | 53 +++++++++ src/storage/session_store.hpp | 52 +++++++++ .../sqlite_storage.cpp} | 73 +++++------- .../sqlite_storage.hpp} | 64 +++++----- src/utils.cpp | 12 +- src/utils.hpp | 7 +- test/client_session_test.cpp | 34 +++--- test/emitter_test.cpp | 54 +++++---- .../sqlite_storage_test.cpp} | 110 +++++++----------- test/tracker_test.cpp | 28 ++--- test/utils_test.cpp | 15 +-- 28 files changed, 411 insertions(+), 304 deletions(-) create mode 100644 src/storage/event_row.hpp create mode 100644 src/storage/event_store.hpp create mode 100644 src/storage/session_store.hpp rename src/{storage.cpp => storage/sqlite_storage.cpp} (86%) rename src/{storage.hpp => storage/sqlite_storage.hpp} (51%) rename test/{storage_test.cpp => storage/sqlite_storage_test.cpp} (50%) diff --git a/README.md b/README.md index a8d6c94..159c994 100644 --- a/README.md +++ b/README.md @@ -34,13 +34,15 @@ Import using the `snowplow.hpp` header file and initialize the tracker with your using namespace snowplow; +// Storage for events to be sent and current session +auto storage = std::make_shared("sp.db"); // Emitter is responsible for sending events to a Snowplow Collector -Emitter emitter("com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 52000, "sp.db"); +Emitter emitter("com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 52000, storage); // Subject defines additional information about your application's environment and user Subject subject; subject.set_user_id("a-user-id"); // Client session keeps track of user sessions -ClientSession client_session("sp.db", 5000, 5000); +ClientSession client_session(storage, 5000, 5000); string platform = "pc"; // platform the tracker is running on string app_id = "openage"; // application ID diff --git a/examples/main.cpp b/examples/main.cpp index a4fdced..37c6440 100644 --- a/examples/main.cpp +++ b/examples/main.cpp @@ -12,6 +12,7 @@ using snowplow::Tracker; using snowplow::StructuredEvent; using snowplow::ScreenViewEvent; using snowplow::TimingEvent; +using snowplow::SqliteStorage; using std::cout; using std::endl; using std::string; @@ -30,7 +31,8 @@ int main(int argc, char **argv) { string uri = argv[1]; string db_name = "demo.db"; - Emitter emitter(uri, Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 52000, db_name); + auto storage = std::make_shared(db_name); + Emitter emitter(uri, Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 52000, storage); emitter.set_request_callback( [](list event_ids, EmitStatus emit_status) { switch (emit_status) { @@ -56,7 +58,7 @@ int main(int argc, char **argv) { subject.set_language("EN"); subject.set_useragent("Mozilla/5.0"); - ClientSession client_session(db_name, 5000, 5000); + ClientSession client_session(storage, 5000, 5000); string platform = "mob"; string app_id = "app-id"; diff --git a/performance/logs.txt b/performance/logs.txt index c9da94a..428f5e0 100644 --- a/performance/logs.txt +++ b/performance/logs.txt @@ -7,3 +7,4 @@ {"desktop_context":{"data":{"deviceManufacturer":"Apple Inc.","deviceModel":"MacBookPro17,1","deviceProcessorCount":8,"osIs64Bit":true,"osServicePack":"","osType":"macOS","osVersion":"12.2.0"},"schema":"iglu:com.snowplowanalytics.snowplow/desktop_context/jsonschema/1-0-0"},"results":{"mocked_emitter_and_mocked_session":6.493122875,"mocked_emitter_and_real_session":6.679922959,"mute_emitter_and_mocked_session":18.915125958,"mute_emitter_and_real_session":17.245701792,"num_operations":10000,"num_threads":5},"timestamp":1648794323772,"tracker_version":"cpp-0.2.0"} {"desktop_context":{"data":{"deviceManufacturer":"Apple Inc.","deviceModel":"MacBookPro17,1","deviceProcessorCount":8,"osIs64Bit":true,"osServicePack":"","osType":"macOS","osVersion":"12.2.0"},"schema":"iglu:com.snowplowanalytics.snowplow/desktop_context/jsonschema/1-0-0"},"results":{"mocked_emitter_and_mocked_session":6.519524667,"mocked_emitter_and_real_session":6.452258541,"mute_emitter_and_mocked_session":21.564418917,"mute_emitter_and_real_session":18.91465825,"num_operations":10000,"num_threads":5},"timestamp":1649249418985,"tracker_version":"cpp-0.2.0"} {"desktop_context":{"data":{"deviceManufacturer":"Apple Inc.","deviceModel":"MacBookPro17,1","deviceProcessorCount":8,"osIs64Bit":true,"osServicePack":"","osType":"macOS","osVersion":"12.2.0"},"schema":"iglu:com.snowplowanalytics.snowplow/desktop_context/jsonschema/1-0-0"},"results":{"mocked_emitter_and_mocked_session":6.459520667,"mocked_emitter_and_real_session":6.436076,"mute_emitter_and_mocked_session":21.589996458,"mute_emitter_and_real_session":19.414266,"num_operations":10000,"num_threads":5},"timestamp":1649861294219,"tracker_version":"cpp-0.2.0"} +{"desktop_context":{"data":{"deviceManufacturer":"Apple Inc.","deviceModel":"MacBookPro17,1","deviceProcessorCount":8,"osIs64Bit":true,"osServicePack":"","osType":"macOS","osVersion":"12.2.0"},"schema":"iglu:com.snowplowanalytics.snowplow/desktop_context/jsonschema/1-0-0"},"results":{"mocked_emitter_and_mocked_session":6.799446,"mocked_emitter_and_real_session":6.430896542,"mute_emitter_and_mocked_session":18.495574375,"mute_emitter_and_real_session":17.518208791,"num_operations":10000,"num_threads":5},"timestamp":1650393809593,"tracker_version":"cpp-0.2.0"} diff --git a/performance/mock_client_session.hpp b/performance/mock_client_session.hpp index f14a5c8..9992692 100644 --- a/performance/mock_client_session.hpp +++ b/performance/mock_client_session.hpp @@ -18,11 +18,14 @@ See the Apache License Version 2.0 for the specific language governing permissio using snowplow::ClientSession; using snowplow::SelfDescribingJson; +using snowplow::SessionStore; using std::string; +using std::shared_ptr; +using std::move; class MockClientSession : public ClientSession { public: - MockClientSession(const string &db_name) : ClientSession(db_name, 5000, 5000) {} + MockClientSession(shared_ptr session_store) : ClientSession(move(session_store), 5000, 5000) {} void set_is_background(bool is_background) {} bool get_is_background() { return false; } diff --git a/performance/mock_emitter.hpp b/performance/mock_emitter.hpp index d227911..d0fb059 100644 --- a/performance/mock_emitter.hpp +++ b/performance/mock_emitter.hpp @@ -18,11 +18,14 @@ See the Apache License Version 2.0 for the specific language governing permissio using snowplow::Emitter; using snowplow::Payload; +using snowplow::EventStore; using std::string; +using std::shared_ptr; +using std::move; class MockEmitter : public Emitter { public: - MockEmitter(const string &db_name) : Emitter("127.0.0.1:9090", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 52000, db_name) {} + MockEmitter(shared_ptr event_store) : Emitter("127.0.0.1:9090", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 52000, move(event_store)) {} void start() {} void stop() {} diff --git a/performance/mute_emitter.hpp b/performance/mute_emitter.hpp index ee62f96..d17717c 100644 --- a/performance/mute_emitter.hpp +++ b/performance/mute_emitter.hpp @@ -17,11 +17,14 @@ See the Apache License Version 2.0 for the specific language governing permissio #include "../src/snowplow.hpp" using snowplow::Emitter; +using snowplow::EventStore; using std::string; +using std::shared_ptr; +using std::move; class MuteEmitter : public Emitter { public: - MuteEmitter(const string &db_name) : Emitter("127.0.0.1:9090", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 52000, db_name) {} + MuteEmitter(shared_ptr event_store) : Emitter("127.0.0.1:9090", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 52000, event_store) {} void start() {} void stop() {} diff --git a/performance/run.cpp b/performance/run.cpp index 7fc106c..1b392c6 100644 --- a/performance/run.cpp +++ b/performance/run.cpp @@ -22,17 +22,17 @@ See the Apache License Version 2.0 for the specific language governing permissio using snowplow::ClientSession; using snowplow::Emitter; -using snowplow::Storage; using snowplow::Subject; using snowplow::Tracker; using snowplow::ScreenViewEvent; using snowplow::StructuredEvent; using snowplow::TimingEvent; +using snowplow::SqliteStorage; using std::vector; using std::chrono::duration; using std::chrono::high_resolution_clock; -void clear_storage(const string &db_name); +void clear_storage(shared_ptr &db_name); void track_events() { Tracker *tracker = Tracker::instance(); @@ -89,42 +89,41 @@ double run(Emitter &emitter, ClientSession &client_session) { } double run_mocked_emitter_and_mocked_session(const string &db_name) { - MockEmitter emitter(db_name); - MockClientSession client_session(db_name); + auto storage = std::make_shared(db_name); + MockEmitter emitter(storage); + MockClientSession client_session(storage); double time = run(emitter, client_session); - Storage::close(); return time; } double run_mocked_emitter_and_real_session(const string &db_name) { - MockEmitter emitter(db_name); - ClientSession client_session(db_name, 5000, 5000); - clear_storage(db_name); + auto storage = std::make_shared(db_name); + MockEmitter emitter(storage); + ClientSession client_session(storage, 5000, 5000); + clear_storage(storage); double time = run(emitter, client_session); - Storage::close(); return time; } double run_mute_emitter_and_mocked_session(const string &db_name) { - MuteEmitter emitter(db_name); - MockClientSession client_session(db_name); - clear_storage(db_name); + auto storage = std::make_shared(db_name); + MuteEmitter emitter(storage); + MockClientSession client_session(storage); + clear_storage(storage); double time = run(emitter, client_session); - Storage::close(); return time; } double run_mute_emitter_and_real_session(const string &db_name) { - MuteEmitter emitter(db_name); - ClientSession client_session(db_name, 5000, 5000); - clear_storage(db_name); + auto storage = std::make_shared(db_name); + MuteEmitter emitter(storage); + ClientSession client_session(storage, 5000, 5000); + clear_storage(storage); double time = run(emitter, client_session); - Storage::close(); return time; } -void clear_storage(const string &db_name) { - Storage *storage = Storage::init(db_name); +void clear_storage(shared_ptr &storage) { storage->delete_all_event_rows(); - storage->delete_all_session_rows(); + storage->delete_session(); } diff --git a/snowplow-cpp-tracker-example.vcxproj b/snowplow-cpp-tracker-example.vcxproj index 67094cc..19c5b8a 100644 --- a/snowplow-cpp-tracker-example.vcxproj +++ b/snowplow-cpp-tracker-example.vcxproj @@ -113,7 +113,7 @@ - + @@ -137,7 +137,9 @@ - + + + diff --git a/snowplow-cpp-tracker-example.vcxproj.filters b/snowplow-cpp-tracker-example.vcxproj.filters index 3a0959b..cfe6dc9 100644 --- a/snowplow-cpp-tracker-example.vcxproj.filters +++ b/snowplow-cpp-tracker-example.vcxproj.filters @@ -54,7 +54,7 @@ Source Files - + Source Files @@ -122,7 +122,13 @@ Source Files - + + Header Files + + + Header Files + + Header Files diff --git a/snowplow-cpp-tracker.vcxproj b/snowplow-cpp-tracker.vcxproj index 6b2e992..37a7d3c 100644 --- a/snowplow-cpp-tracker.vcxproj +++ b/snowplow-cpp-tracker.vcxproj @@ -117,7 +117,7 @@ - + @@ -131,7 +131,7 @@ - + @@ -155,7 +155,9 @@ - + + + diff --git a/snowplow-cpp-tracker.vcxproj.filters b/snowplow-cpp-tracker.vcxproj.filters index 865f56c..6464b58 100644 --- a/snowplow-cpp-tracker.vcxproj.filters +++ b/snowplow-cpp-tracker.vcxproj.filters @@ -54,7 +54,7 @@ Source Files - + Source Files @@ -102,7 +102,7 @@ Source Files - + Source Files @@ -161,7 +161,13 @@ Source Files - + + Header Files + + + Header Files + + Header Files diff --git a/src/client_session.cpp b/src/client_session.cpp index b980ab2..b777722 100644 --- a/src/client_session.cpp +++ b/src/client_session.cpp @@ -13,15 +13,16 @@ See the Apache License Version 2.0 for the specific language governing permissio #include "client_session.hpp" #include "constants.hpp" -#include "storage.hpp" #include "utils.hpp" using namespace snowplow; using std::lock_guard; using std::unique_lock; +using std::shared_ptr; +using std::unique_ptr; -ClientSession::ClientSession(const string &db_name, unsigned long long foreground_timeout, unsigned long long background_timeout) { - Storage::init(db_name); +ClientSession::ClientSession(shared_ptr session_store, unsigned long long foreground_timeout, unsigned long long background_timeout) { + this->m_session_store = std::move(session_store); this->m_foreground_timeout = foreground_timeout; this->m_background_timeout = background_timeout; @@ -30,21 +31,18 @@ ClientSession::ClientSession(const string &db_name, unsigned long long foregroun this->m_is_new_session = true; // Check for existing session - list *session_rows = new list; - Storage::instance()->select_all_session_rows(session_rows); + auto session = m_session_store->get_session(); - if (session_rows->size() == 1) { + if (session) { try { - json session_context = session_rows->front(); - - this->m_user_id = session_context[SNOWPLOW_SESSION_USER_ID].get(); - this->m_current_session_id = session_context[SNOWPLOW_SESSION_ID].get(); - this->m_session_index = session_context[SNOWPLOW_SESSION_INDEX].get(); + this->m_user_id = (*session)[SNOWPLOW_SESSION_USER_ID].get(); + this->m_current_session_id = (*session)[SNOWPLOW_SESSION_ID].get(); + this->m_session_index = (*session)[SNOWPLOW_SESSION_INDEX].get(); } catch (...) { this->m_user_id = Utils::get_uuid4(); this->m_current_session_id = ""; this->m_session_index = 0; - Storage::instance()->delete_all_session_rows(); + m_session_store->delete_session(); } } else { this->m_user_id = Utils::get_uuid4(); @@ -52,9 +50,6 @@ ClientSession::ClientSession(const string &db_name, unsigned long long foregroun this->m_session_index = 0; } this->update_last_session_check_at(); - - session_rows->clear(); - delete (session_rows); } // --- Public @@ -79,7 +74,7 @@ SelfDescribingJson ClientSession::update_and_get_session_context(const string &e } if (save_to_storage) { - Storage::instance()->insert_update_session(session_context_data); + m_session_store->set_session(session_context_data); } SelfDescribingJson sdj(SNOWPLOW_SCHEMA_CLIENT_SESSION, session_context_data); return sdj; diff --git a/src/client_session.hpp b/src/client_session.hpp index 319837e..f8a29bf 100644 --- a/src/client_session.hpp +++ b/src/client_session.hpp @@ -18,9 +18,11 @@ See the Apache License Version 2.0 for the specific language governing permissio #include #include "payload/self_describing_json.hpp" #include "../include/json.hpp" +#include "storage/session_store.hpp" using std::string; using std::mutex; +using std::shared_ptr; using json = nlohmann::json; namespace snowplow { @@ -37,11 +39,11 @@ class ClientSession { /** * @brief Construct a new Client Session object * - * @param db_name Path to the SQLite database where session data will be read and stored (must be the same as for Emitter) + * @param session_store Defines the database where session data will be read and stored * @param foreground_timeout Timeout in ms for updating the session when the app is in background * @param background_timeout Timeout in ms for updating the session when the app is in foreground */ - ClientSession(const string & db_name, unsigned long long foreground_timeout, unsigned long long background_timeout); + ClientSession(shared_ptr session_store, unsigned long long foreground_timeout, unsigned long long background_timeout); /** * @brief Forces a new session to be started when next event is tracked. @@ -84,6 +86,7 @@ class ClientSession { private: // Constructor + shared_ptr m_session_store; unsigned long long m_foreground_timeout; unsigned long long m_background_timeout; diff --git a/src/emitter.cpp b/src/emitter.cpp index d016300..af8f883 100644 --- a/src/emitter.cpp +++ b/src/emitter.cpp @@ -18,6 +18,7 @@ using std::invalid_argument; using std::lock_guard; using std::stringstream; using std::unique_lock; +using std::shared_ptr; using std::unique_ptr; using std::async; using std::to_string; @@ -47,13 +48,11 @@ unique_ptr createDefaultHttpClient() { #endif Emitter::Emitter(const string &uri, Method method, Protocol protocol, int send_limit, - int byte_limit_post, int byte_limit_get, const string &db_name) : Emitter(uri, method, protocol, send_limit, byte_limit_post, byte_limit_get, db_name, createDefaultHttpClient()) { + int byte_limit_post, int byte_limit_get, shared_ptr event_store) : Emitter(uri, method, protocol, send_limit, byte_limit_post, byte_limit_get, std::move(event_store), createDefaultHttpClient()) { } Emitter::Emitter(const string &uri, Method method, Protocol protocol, int send_limit, - int byte_limit_post, int byte_limit_get, const string &db_name, unique_ptr http_client) : m_url(this->get_collector_url(uri, protocol, method)) { - - Storage::init(db_name); + int byte_limit_post, int byte_limit_get, shared_ptr event_store, unique_ptr http_client) : m_url(this->get_collector_url(uri, protocol, method)) { if (uri == "") { throw invalid_argument("FATAL: Emitter URI cannot be empty."); @@ -78,6 +77,7 @@ Emitter::Emitter(const string &uri, Method method, Protocol protocol, int send_l this->m_send_limit = send_limit; this->m_byte_limit_post = byte_limit_post; this->m_byte_limit_get = byte_limit_get; + this->m_event_store = move(event_store); this->m_http_client = move(http_client); } @@ -108,7 +108,7 @@ void Emitter::stop() { } void Emitter::add(Payload payload) { - Storage::instance()->insert_payload(payload); + m_event_store->add_event(payload); this->m_check_db.notify_all(); } @@ -132,8 +132,8 @@ void Emitter::flush() { void Emitter::run() { do { - list event_rows; - Storage::instance()->select_event_row_range(&event_rows, m_send_limit); + list event_rows; + m_event_store->get_event_rows_batch(&event_rows, m_send_limit); if (event_rows.size() > 0) { // emit the events @@ -162,7 +162,7 @@ void Emitter::run() { list delete_row_ids; delete_row_ids.splice(delete_row_ids.end(), success_row_ids); delete_row_ids.splice(delete_row_ids.end(), failed_wont_retry_row_ids); - Storage::instance()->delete_event_row_ids(&delete_row_ids); + m_event_store->delete_event_rows_with_ids(delete_row_ids); } else { m_check_fin.notify_all(); @@ -173,7 +173,7 @@ void Emitter::run() { } while (is_running()); } -void Emitter::do_send(const list &event_rows, list *results) { +void Emitter::do_send(const list &event_rows, list *results) { list> request_futures; // Send each request in its own thread @@ -231,7 +231,7 @@ void Emitter::do_send(const list &event_rows, list &success_row_ids, const list &failed_will_retry_row_ids, const list &failed_wont_retry_row_ids, const list &event_rows) const { +void Emitter::trigger_callbacks(const list &success_row_ids, const list &failed_will_retry_row_ids, const list &failed_wont_retry_row_ids, const list &event_rows) const { if (!m_callback) { return; } diff --git a/src/emitter.hpp b/src/emitter.hpp index 63222d6..b0161c5 100644 --- a/src/emitter.hpp +++ b/src/emitter.hpp @@ -22,7 +22,7 @@ See the Apache License Version 2.0 for the specific language governing permissio #include #include "constants.hpp" #include "utils.hpp" -#include "storage.hpp" +#include "storage/event_store.hpp" #include "payload/payload.hpp" #include "payload/self_describing_json.hpp" #include "cracked_url.hpp" @@ -35,6 +35,7 @@ using std::condition_variable; using std::mutex; using std::unique_ptr; using std::list; +using std::shared_ptr; namespace snowplow { @@ -87,25 +88,19 @@ class Emitter { /** * @brief Construct a new Emitter object * - * The `db_name` can be any valid path on your host file system (that can be created with the current user). - * By default it will create the required files wherever the application is being run from. - * * @param uri The URI to send events to * @param method The request type to use (GET or POST) * @param protocol The protocol to use (http or https) * @param send_limit The maximum amount of events to send at a time * @param byte_limit_post The byte limit when sending a POST request * @param byte_limit_get The byte limit when sending a GET request - * @param db_name Defines the path and file name of the database + * @param event_store Defines the database to use for event queue */ Emitter(const string & uri, Method method, Protocol protocol, int send_limit, - int byte_limit_post, int byte_limit_get, const string & db_name); + int byte_limit_post, int byte_limit_get, shared_ptr event_store); /** * @brief Construct a new Emitter object with a custom HTTP client - * - * The `db_name` can be any valid path on your host file system (that can be created with the current user). - * By default it will create the required files wherever the application is being run from. * * @param uri The URI to send events to * @param method The request type to use (GET or POST) @@ -113,11 +108,11 @@ class Emitter { * @param send_limit The maximum amount of events to send at a time * @param byte_limit_post The byte limit when sending a POST request * @param byte_limit_get The byte limit when sending a GET request - * @param db_name Defines the path and file name of the database + * @param event_store Defines the database to use for event queue * @param http_client Unique pointer to a custom HTTP client to send GET and POST requests with */ Emitter(const string & uri, Method method, Protocol protocol, int send_limit, - int byte_limit_post, int byte_limit_get, const string & db_name, unique_ptr http_client); + int byte_limit_post, int byte_limit_get, shared_ptr event_store, unique_ptr http_client); ~Emitter(); /** @@ -211,6 +206,7 @@ class Emitter { private: CrackedUrl m_url; Method m_method; + shared_ptr m_event_store; unique_ptr m_http_client; unsigned int m_send_limit; unsigned int m_byte_limit_get; @@ -228,10 +224,10 @@ class Emitter { map m_custom_retry_for_status_codes; void run(); - void do_send(const list &event_rows, list *results); + void do_send(const list &event_rows, list *results); string build_post_data_json(list payload_list); string get_collector_url(const string &uri, Protocol protocol, Method method) const; - void trigger_callbacks(const list &success_row_ids, const list &failed_will_retry_row_ids, const list &failed_wont_retry_row_ids, const list &event_rows) const; + void trigger_callbacks(const list &success_row_ids, const list &failed_will_retry_row_ids, const list &failed_wont_retry_row_ids, const list &event_rows) const; void execute_callback(const list &event_ids, EmitStatus emit_status) const; }; } // namespace snowplow diff --git a/src/snowplow.hpp b/src/snowplow.hpp index cb2a970..95b0129 100644 --- a/src/snowplow.hpp +++ b/src/snowplow.hpp @@ -20,10 +20,15 @@ See the Apache License Version 2.0 for the specific language governing permissio #include "client_session.hpp" #include "emitter.hpp" -#include "storage.hpp" #include "subject.hpp" #include "tracker.hpp" +// storage +#include "storage/event_row.hpp" +#include "storage/event_store.hpp" +#include "storage/session_store.hpp" +#include "storage/sqlite_storage.hpp" + // http #include "http/http_client.hpp" #include "http/http_client_apple.hpp" @@ -31,6 +36,7 @@ See the Apache License Version 2.0 for the specific language governing permissio #include "http/http_client_windows.hpp" // payload +#include "payload/payload.hpp" #include "payload/self_describing_json.hpp" // events diff --git a/src/storage/event_row.hpp b/src/storage/event_row.hpp new file mode 100644 index 0000000..8e4306f --- /dev/null +++ b/src/storage/event_row.hpp @@ -0,0 +1,26 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#ifndef EVENT_ROW_H +#define EVENT_ROW_H + +#include "../payload/payload.hpp" + +namespace snowplow { +struct EventRow { + int id; + Payload event; +}; +} // namespace snowplow + +#endif diff --git a/src/storage/event_store.hpp b/src/storage/event_store.hpp new file mode 100644 index 0000000..0a64374 --- /dev/null +++ b/src/storage/event_store.hpp @@ -0,0 +1,53 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#ifndef EVENT_STORE_H +#define EVENT_STORE_H + +#include "event_row.hpp" +#include + +using std::list; + +namespace snowplow { +/** + * @brief Storage interface used by the Emitter to store and access events. + * + * You may implement your own storage or make use of the `SqliteStorage` provided by the tracker. + */ +struct EventStore { + /** + * @brief Insert event payload into event queue. + * + * @param payload Event payload to store + */ + virtual void add_event(const Payload &payload) = 0; + + /** + * @brief Retrieve event rows from event queue up to the given limit. + * + * @param event_list Output event list to add event rows to + * @param number_to_get Maximum number of events to retrieve + */ + virtual void get_event_rows_batch(list *event_list, int number_to_get) = 0; + + /** + * @brief Remove event rows with the given IDs. + * + * @param id_list List of event row IDs to remove + */ + virtual void delete_event_rows_with_ids(const list &id_list) = 0; +}; +} // namespace snowplow + +#endif diff --git a/src/storage/session_store.hpp b/src/storage/session_store.hpp new file mode 100644 index 0000000..f227978 --- /dev/null +++ b/src/storage/session_store.hpp @@ -0,0 +1,52 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#ifndef SESSION_STORE_H +#define SESSION_STORE_H + +#include "../../include/json.hpp" +#include + +using std::list; +using std::unique_ptr; +using json = nlohmann::json; + +namespace snowplow { +/** + * @brief Storage interface used by the ClientSession to store and access sessions. + * + * You may implement your own storage or make use of the `SqliteStorage` provided by the tracker. + */ +struct SessionStore { + /** + * @brief Return the current session. + * + * @return Pointer to session or nullptr if it doesn't exist + */ + virtual unique_ptr get_session() = 0; + + /** + * @brief Insert or replace the session. + * + * @param session_data Data to insert + */ + virtual void set_session(const json &session_data) = 0; + + /** + * @brief Remove the current session. + */ + virtual void delete_session() = 0; +}; +} // namespace snowplow + +#endif diff --git a/src/storage.cpp b/src/storage/sqlite_storage.cpp similarity index 86% rename from src/storage.cpp rename to src/storage/sqlite_storage.cpp index c5dfc85..e560cdc 100644 --- a/src/storage.cpp +++ b/src/storage/sqlite_storage.cpp @@ -11,7 +11,10 @@ software distributed under the Apache License Version 2.0 is distributed on an See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. */ -#include "storage.hpp" +#include "sqlite_storage.hpp" + +#include +#include "../utils.hpp" using namespace snowplow; using std::cerr; @@ -29,38 +32,9 @@ const string db_table_session = "sessions"; const string db_column_session_id = "id"; const string db_column_session_data = "data"; -// --- Static Singleton Access - -Storage *Storage::m_instance = 0; -mutex Storage::m_db_get; - -Storage *Storage::init(const string &db_name) { - lock_guard guard(m_db_get); - if (!m_instance) { - m_instance = new Storage(db_name); - } - return m_instance; -} - -Storage *Storage::instance() { - lock_guard guard(m_db_get); - if (!m_instance) { - throw runtime_error("FATAL: Storage must be initialized first."); - } - return m_instance; -} - -void Storage::close() { - lock_guard guard(m_db_get); - if (m_instance) { - delete (m_instance); - } - m_instance = 0; -} - // --- Constructor & Destructor -Storage::Storage(const string &db_name) { +SqliteStorage::SqliteStorage(const string &db_name) { sqlite3 *db; char *err_msg = 0; int rc; @@ -127,14 +101,14 @@ Storage::Storage(const string &db_name) { } } -Storage::~Storage() { +SqliteStorage::~SqliteStorage() { sqlite3_finalize(this->m_add_stmt); sqlite3_close(this->m_db); } // --- INSERT -void Storage::insert_payload(Payload payload) { +void SqliteStorage::add_event(const Payload &payload) { lock_guard guard(this->m_db_access); int rc; @@ -160,7 +134,7 @@ void Storage::insert_payload(Payload payload) { } } -void Storage::insert_update_session(json session_data) { +void SqliteStorage::set_session(const json &session_data) { lock_guard guard(this->m_db_access); int rc; @@ -209,7 +183,7 @@ void Storage::insert_update_session(json session_data) { static int select_event_callback(void *data, int argc, char **argv, char **az_col_name) { int i, id = 0; - list *data_list = (list *)data; + list *data_list = (list *)data; Payload event; for (i = 0; i < argc; i++) { @@ -220,14 +194,14 @@ static int select_event_callback(void *data, int argc, char **argv, char **az_co } } - Storage::EventRow event_row; + EventRow event_row; event_row.id = id; event_row.event = event; data_list->push_back(event_row); return 0; } -void Storage::select_all_event_rows(list *event_list) { +void SqliteStorage::get_all_event_rows(list *event_list) { lock_guard guard(this->m_db_access); int rc; @@ -243,7 +217,7 @@ void Storage::select_all_event_rows(list *event_list) { } } -void Storage::select_event_row_range(list *event_list, int range) { +void SqliteStorage::get_event_rows_batch(list *event_list, int number_to_get) { lock_guard guard(this->m_db_access); int rc; @@ -251,7 +225,7 @@ void Storage::select_event_row_range(list *event_list, int ra string select_range_query = "SELECT * FROM " + db_table_events + " " + - "ORDER BY " + db_column_events_id + " ASC LIMIT " + std::to_string(range) + ";"; + "ORDER BY " + db_column_events_id + " ASC LIMIT " + std::to_string(number_to_get) + ";"; rc = sqlite3_exec(this->m_db, (const char *)select_range_query.c_str(), select_event_callback, (void *)event_list, &err_msg); if (rc != SQLITE_OK) { @@ -276,25 +250,30 @@ static int select_session_callback(void *data, int argc, char **argv, char **az_ return 0; } -void Storage::select_all_session_rows(list *session_list) { +unique_ptr SqliteStorage::get_session() { lock_guard guard(this->m_db_access); - + list session_list; int rc; char *err_msg = 0; string select_all_query = - "SELECT * FROM " + db_table_session + ";"; + "SELECT * FROM " + db_table_session + " WHERE " + db_column_session_id + " = 1;"; - rc = sqlite3_exec(this->m_db, (const char *)select_all_query.c_str(), select_session_callback, (void *)session_list, &err_msg); + rc = sqlite3_exec(this->m_db, (const char *)select_all_query.c_str(), select_session_callback, (void *)&session_list, &err_msg); if (rc != SQLITE_OK) { cerr << "ERROR: Failed to execute select_all_query: " << rc << "; " << err_msg << endl; sqlite3_free(err_msg); } + + if (!session_list.empty()) { + return unique_ptr(new json(session_list.front())); + } + return nullptr; } // --- DELETE -void Storage::delete_all_event_rows() { +void SqliteStorage::delete_all_event_rows() { lock_guard guard(this->m_db_access); int rc; @@ -310,7 +289,7 @@ void Storage::delete_all_event_rows() { } } -void Storage::delete_event_row_ids(list *id_list) { +void SqliteStorage::delete_event_rows_with_ids(const list &id_list) { lock_guard guard(this->m_db_access); int rc; @@ -327,7 +306,7 @@ void Storage::delete_event_row_ids(list *id_list) { } } -void Storage::delete_all_session_rows() { +void SqliteStorage::delete_session() { lock_guard guard(this->m_db_access); int rc; @@ -345,6 +324,6 @@ void Storage::delete_all_session_rows() { // --- Getters -string Storage::get_db_name() { +string SqliteStorage::get_db_name() { return this->m_db_name; } diff --git a/src/storage.hpp b/src/storage/sqlite_storage.hpp similarity index 51% rename from src/storage.hpp rename to src/storage/sqlite_storage.hpp index c2a5dc1..32cf3a8 100644 --- a/src/storage.hpp +++ b/src/storage/sqlite_storage.hpp @@ -11,19 +11,16 @@ software distributed under the Apache License Version 2.0 is distributed on an See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. */ -#ifndef STORAGE_H -#define STORAGE_H +#ifndef SQLITE_STORAGE_H +#define SQLITE_STORAGE_H -#include +#include "event_store.hpp" +#include "session_store.hpp" #include -#include #include -#include #include -#include "utils.hpp" -#include "payload/payload.hpp" -#include "../include/sqlite3.h" -#include "../include/json.hpp" +#include "../../include/json.hpp" +#include "../../include/sqlite3.h" using std::mutex; using std::string; @@ -32,41 +29,32 @@ using json = nlohmann::json; namespace snowplow { /** - * @brief Tracker internal SQLite storage for events and session information. To be used internally within tracker only. - * + * @brief Tracker SQLite storage for events and session information. + * */ -class Storage { -private: - static Storage *m_instance; - static mutex m_db_get; +class SqliteStorage : public EventStore, public SessionStore { +public: + SqliteStorage(const string &db_name); + ~SqliteStorage(); + + void add_event(const Payload &payload); + void get_all_event_rows(list *event_list); + void get_event_rows_batch(list *event_list, int number_to_get); + void delete_all_event_rows(); + void delete_event_rows_with_ids(const list &id_list); + + void set_session(const json &session_data); + unique_ptr get_session(); + void delete_session(); + + string get_db_name(); - Storage(const string & db_name); - ~Storage(); +private: string m_db_name; mutex m_db_access; sqlite3 *m_db; sqlite3_stmt *m_add_stmt; - -public: - static Storage *init(const string & db_name); - static Storage *instance(); - static void close(); - - struct EventRow { - int id; - Payload event; - }; - - void insert_payload(Payload payload); - void insert_update_session(json session_data); - void select_all_event_rows(list* event_list); - void select_event_row_range(list* event_list, int range); - void select_all_session_rows(list* session_list); - void delete_all_event_rows(); - void delete_event_row_ids(list* id_list); - void delete_all_session_rows(); - string get_db_name(); }; -} +} // namespace snowplow #endif diff --git a/src/utils.cpp b/src/utils.cpp index 470fc66..3c8214d 100644 --- a/src/utils.cpp +++ b/src/utils.cpp @@ -73,17 +73,17 @@ string Utils::get_uuid4() { #endif -string Utils::int_list_to_string(list *int_list, const string &delimiter) { +string Utils::int_list_to_string(const list &int_list, const string &delimiter) { stringstream s; - int i; - list::iterator it; + int i = 0; + int length = int_list.size(); - int length = int_list->size(); - for (i = 0, it = int_list->begin(); it != int_list->end(); ++it, ++i) { - s << *it; + for (auto const &value : int_list) { + s << value; if (i < length - 1) { s << delimiter; } + i++; } return s.str(); diff --git a/src/utils.hpp b/src/utils.hpp index ac122c6..56e3d76 100644 --- a/src/utils.hpp +++ b/src/utils.hpp @@ -54,11 +54,11 @@ namespace snowplow { class Utils { public: static string get_uuid4(); - static string int_list_to_string(list* int_list, const string & delimiter); + static string int_list_to_string(const list &int_list, const string &delimiter); static string map_to_query_string(map m); static string url_encode(string value); static string serialize_payload(Payload payload); - static Payload deserialize_json_str(const string & json_str); + static Payload deserialize_json_str(const string &json_str); static unsigned long long get_unix_epoch_ms(); static SelfDescribingJson get_desktop_context(); static string get_os_type(); @@ -68,9 +68,10 @@ class Utils { static string get_device_manufacturer(); static string get_device_model(); static int get_device_processor_count(); + private: static SelfDescribingJson *m_desktop_context; }; -} +} // namespace snowplow #endif diff --git a/test/client_session_test.cpp b/test/client_session_test.cpp index 45467ab..e2d7c97 100644 --- a/test/client_session_test.cpp +++ b/test/client_session_test.cpp @@ -12,7 +12,7 @@ See the Apache License Version 2.0 for the specific language governing permissio */ #include "../src/client_session.hpp" -#include "../src/storage.hpp" +#include "../src/storage/sqlite_storage.hpp" #include "catch.hpp" #include @@ -22,9 +22,10 @@ using std::this_thread::sleep_for; TEST_CASE("client_session") { SECTION("The Session doesn't change for subsequent tracked events") { - Storage::init("test1.db")->delete_all_session_rows(); + auto storage = std::make_shared("test1.db"); + storage->delete_session(); - ClientSession cs("test1.db", 500, 500); + ClientSession cs(storage, 500, 500); SelfDescribingJson session_json_1 = cs.update_and_get_session_context("event-id-1"); SelfDescribingJson session_json_2 = cs.update_and_get_session_context("event-id-2"); @@ -38,9 +39,10 @@ TEST_CASE("client_session") { } SECTION("The Session must persist and update in the background") { - Storage::init("test1.db")->delete_all_session_rows(); + auto storage = std::make_shared("test1.db"); + storage->delete_session(); - ClientSession cs("test1.db", 500, 500); + ClientSession cs(storage, 500, 500); REQUIRE(false == cs.get_is_background()); cs.set_is_background(true); @@ -71,15 +73,16 @@ TEST_CASE("client_session") { } SECTION("The Session must fetch information from previous sessions") { - Storage::init("test2.db")->delete_all_session_rows(); + auto storage = std::make_shared("test2.db"); + storage->delete_session(); - ClientSession cs("test2.db", 10000, 10000); + ClientSession cs(storage, 10000, 10000); SelfDescribingJson session_json = cs.update_and_get_session_context("event-id2"); json data = session_json.get()[SNOWPLOW_DATA]; REQUIRE(1 == data[SNOWPLOW_SESSION_INDEX].get()); - ClientSession cs1("test2.db", 500, 500); + ClientSession cs1(storage, 500, 500); SelfDescribingJson session_json1 = cs1.update_and_get_session_context("event-id2"); REQUIRE("iglu:com.snowplowanalytics.snowplow/client_session/jsonschema/1-0-1" == session_json1.get()[SNOWPLOW_SCHEMA].get()); @@ -106,9 +109,10 @@ TEST_CASE("client_session") { } SECTION("If corrupted data makes it into the session database entry use defaults") { - Storage::init("test3.db")->insert_update_session("{}"_json); + auto storage = std::make_shared("test3.db"); + storage->set_session("{}"_json); - ClientSession cs("test3.db", 500, 500); + ClientSession cs(storage, 500, 500); SelfDescribingJson session_json = cs.update_and_get_session_context("event-id3"); REQUIRE("iglu:com.snowplowanalytics.snowplow/client_session/jsonschema/1-0-1" == session_json.get()[SNOWPLOW_SCHEMA].get()); @@ -135,9 +139,10 @@ TEST_CASE("client_session") { } SECTION("The Session updates using background timeout in background") { - Storage::init("test1.db")->delete_all_session_rows(); + auto storage = std::make_shared("test1.db"); + storage->delete_session(); - ClientSession cs("test1.db", 500, 1); + ClientSession cs(storage, 500, 1); cs.set_is_background(true); SelfDescribingJson session_json_1 = cs.update_and_get_session_context("event-id-1"); @@ -162,9 +167,10 @@ TEST_CASE("client_session") { } SECTION("The Session updates using background timeout after transition to foreground") { - Storage::init("test1.db")->delete_all_session_rows(); + auto storage = std::make_shared("test1.db"); + storage->delete_session(); - ClientSession cs("test1.db", 500, 1); + ClientSession cs(storage, 500, 1); SelfDescribingJson session_json_1 = cs.update_and_get_session_context("event-id-1"); cs.set_is_background(true); diff --git a/test/emitter_test.cpp b/test/emitter_test.cpp index 131d61e..b1aa073 100644 --- a/test/emitter_test.cpp +++ b/test/emitter_test.cpp @@ -13,6 +13,8 @@ See the Apache License Version 2.0 for the specific language governing permissio #include "../src/emitter.hpp" #include "../src/payload/event_payload.hpp" +#include "http/test_http_client.hpp" +#include "../src/storage/sqlite_storage.hpp" #include "catch.hpp" #include "http/test_http_client.hpp" @@ -35,6 +37,8 @@ string track_sample_event(Emitter &emitter) { } TEST_CASE("emitter") { + auto storage = std::make_shared("test-emitter.db"); + SECTION("Emitter rejects urls (starting with http:// or https://)") { bool inv_arg_http = false; bool inv_arg_https = false; @@ -42,25 +46,25 @@ TEST_CASE("emitter") { bool inv_arg_https_case = false; try { - Emitter emitter("http://com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 51000, "test-emitter.db", unique_ptr(new TestHttpClient())); + Emitter emitter("http://com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 51000, storage, unique_ptr(new TestHttpClient())); } catch (invalid_argument) { inv_arg_http = true; } try { - Emitter emitter("https://com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 51000, "test-emitter.db", unique_ptr(new TestHttpClient())); + Emitter emitter("https://com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 51000, storage, unique_ptr(new TestHttpClient())); } catch (invalid_argument) { inv_arg_https = true; } try { - Emitter emitter("HTTP://com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 51000, "test-emitter.db", unique_ptr(new TestHttpClient())); + Emitter emitter("HTTP://com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 51000, storage, unique_ptr(new TestHttpClient())); } catch (invalid_argument) { inv_arg_http_case = true; } try { - Emitter emitter("HTTPS://com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 51000, "test-emitter.db", unique_ptr(new TestHttpClient())); + Emitter emitter("HTTPS://com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 51000, storage, unique_ptr(new TestHttpClient())); } catch (invalid_argument) { inv_arg_https_case = true; } @@ -72,7 +76,7 @@ TEST_CASE("emitter") { } SECTION("Emitter setup confirmation") { - Emitter emitter("com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 51000, "test-emitter.db", unique_ptr(new TestHttpClient())); + Emitter emitter("com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 52000, 51000, storage, unique_ptr(new TestHttpClient())); REQUIRE(false == emitter.is_running()); REQUIRE("http://com.acme.collector/com.snowplowanalytics.snowplow/tp2" == emitter.get_cracked_url().to_string()); @@ -97,7 +101,7 @@ TEST_CASE("emitter") { emitter.flush(); REQUIRE(false == emitter.is_running()); - Emitter emitter_1("com.acme.collector", Emitter::Method::GET, Emitter::Protocol::HTTPS, 500, 52000, 51000, "test-emitter.db", unique_ptr(new TestHttpClient())); + Emitter emitter_1("com.acme.collector", Emitter::Method::GET, Emitter::Protocol::HTTPS, 500, 52000, 51000, storage, unique_ptr(new TestHttpClient())); REQUIRE(false == emitter_1.is_running()); REQUIRE("https://com.acme.collector/i" == emitter_1.get_cracked_url().to_string()); @@ -108,7 +112,7 @@ TEST_CASE("emitter") { bool inv_argument_empty_uri = false; try { - Emitter emitter_2("", Emitter::Method::GET, Emitter::Protocol::HTTPS, 500, 52000, 51000, "test-emitter.db", unique_ptr(new TestHttpClient())); + Emitter emitter_2("", Emitter::Method::GET, Emitter::Protocol::HTTPS, 500, 52000, 51000, storage, unique_ptr(new TestHttpClient())); } catch (invalid_argument) { inv_argument_empty_uri = true; } @@ -116,7 +120,7 @@ TEST_CASE("emitter") { bool inv_argument_bad_url = false; try { - Emitter emitter_3("../:random../gibber", Emitter::Method::GET, Emitter::Protocol::HTTPS, 500, 52000, 51000, "test-emitter.db", unique_ptr(new TestHttpClient())); + Emitter emitter_3("../:random../gibber", Emitter::Method::GET, Emitter::Protocol::HTTPS, 500, 52000, 51000, storage, unique_ptr(new TestHttpClient())); } catch (invalid_argument) { inv_argument_bad_url = true; } @@ -124,7 +128,7 @@ TEST_CASE("emitter") { } SECTION("Emitter should track and remove only successful events from the database for GET requests") { - Emitter emitter("com.acme.collector", Emitter::Method::GET, Emitter::Protocol::HTTPS, 500, 52000, 52000, "test-emitter.db", unique_ptr(new TestHttpClient())); + Emitter emitter("com.acme.collector", Emitter::Method::GET, Emitter::Protocol::HTTPS, 500, 52000, 52000, storage, unique_ptr(new TestHttpClient())); emitter.start(); Payload payload; @@ -138,8 +142,8 @@ TEST_CASE("emitter") { list requests = TestHttpClient::get_requests_list(); REQUIRE(0 != requests.size()); - list *event_list = new list; - Storage::instance()->select_all_event_rows(event_list); + list *event_list = new list; + storage->get_all_event_rows(event_list); REQUIRE(0 == event_list->size()); event_list->clear(); @@ -150,8 +154,8 @@ TEST_CASE("emitter") { emitter.add(payload); } - event_list = new list; - Storage::instance()->select_all_event_rows(event_list); + event_list = new list; + storage->get_all_event_rows(event_list); REQUIRE(10 == event_list->size()); event_list->clear(); @@ -161,7 +165,7 @@ TEST_CASE("emitter") { } SECTION("Emitter should track and remove only successful events from the database for POST requests") { - Emitter emitter("com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 500, 500, "test-emitter.db", unique_ptr(new TestHttpClient())); + Emitter emitter("com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 500, 500, storage, unique_ptr(new TestHttpClient())); emitter.start(); Payload payload; @@ -175,8 +179,8 @@ TEST_CASE("emitter") { list requests = TestHttpClient::get_requests_list(); REQUIRE(0 != requests.size()); - list *event_list = new list; - Storage::instance()->select_all_event_rows(event_list); + list *event_list = new list; + storage->get_all_event_rows(event_list); REQUIRE(0 == event_list->size()); event_list->clear(); @@ -188,8 +192,8 @@ TEST_CASE("emitter") { emitter.add(payload); } - event_list = new list; - Storage::instance()->select_all_event_rows(event_list); + event_list = new list; + storage->get_all_event_rows(event_list); REQUIRE(10 == event_list->size()); event_list->clear(); @@ -203,8 +207,8 @@ TEST_CASE("emitter") { emitter.start(); emitter.flush(); - event_list = new list; - Storage::instance()->select_all_event_rows(event_list); + event_list = new list; + storage->get_all_event_rows(event_list); REQUIRE(0 == event_list->size()); event_list->clear(); @@ -216,8 +220,8 @@ TEST_CASE("emitter") { emitter.start(); emitter.flush(); - event_list = new list; - Storage::instance()->select_all_event_rows(event_list); + event_list = new list; + storage->get_all_event_rows(event_list); REQUIRE(0 == event_list->size()); event_list->clear(); @@ -226,7 +230,7 @@ TEST_CASE("emitter") { } SECTION("triggers callback for all emit statuses") { - Emitter emitter("com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 500, 500, "test-emitter.db", unique_ptr(new TestHttpClient())); + Emitter emitter("com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 500, 500, storage, unique_ptr(new TestHttpClient())); vector, EmitStatus>> calls; emitter.set_request_callback( [&](list event_ids, EmitStatus status) { @@ -268,7 +272,7 @@ TEST_CASE("emitter") { } SECTION("doesn't trigger callbacks for not subscribed emit statuses") { - Emitter emitter("com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 500, 500, "test-emitter.db", unique_ptr(new TestHttpClient())); + Emitter emitter("com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 500, 500, storage, unique_ptr(new TestHttpClient())); vector, EmitStatus>> calls; emitter.set_request_callback( [&](list event_ids, EmitStatus status) { @@ -301,7 +305,7 @@ TEST_CASE("emitter") { } SECTION("Emitter should not retry failed events for no-retry status codes") { - Emitter emitter("com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 500, 500, "test-emitter.db", unique_ptr(new TestHttpClient())); + Emitter emitter("com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 500, 500, storage, unique_ptr(new TestHttpClient())); TestHttpClient::set_http_response_code(200); // success, don't retry track_sample_event(emitter); diff --git a/test/storage_test.cpp b/test/storage/sqlite_storage_test.cpp similarity index 50% rename from test/storage_test.cpp rename to test/storage/sqlite_storage_test.cpp index 1e49e4a..a4800f5 100644 --- a/test/storage_test.cpp +++ b/test/storage/sqlite_storage_test.cpp @@ -11,56 +11,32 @@ software distributed under the Apache License Version 2.0 is distributed on an See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. */ -#include "../src/storage.hpp" -#include "catch.hpp" +#include "../../src/storage/sqlite_storage.hpp" +#include "../catch.hpp" using namespace snowplow; using std::runtime_error; -TEST_CASE("storage") { - Storage::close(); - Storage *storage = Storage::init("test1.db"); - REQUIRE("test1.db" == storage->get_db_name()); - storage->delete_all_event_rows(); - storage->delete_all_session_rows(); - - SECTION("singleton controls should work as expected") { - Storage::close(); - - bool runtime_error_not_init = false; - try { - Storage::instance(); - } catch (runtime_error) { - runtime_error_not_init = true; - } - REQUIRE(runtime_error_not_init == true); - - Storage::init("test1.db"); - - runtime_error_not_init = false; - try { - Storage::instance(); - } catch (runtime_error) { - runtime_error_not_init = true; - } - REQUIRE(runtime_error_not_init == false); +TEST_CASE("SQLite storage") { + SECTION("database name reflects initialization") { + SqliteStorage storage("test1.db"); + REQUIRE("test1.db" == storage.get_db_name()); } SECTION("database should throw exceptions for unmanageable errors") { - Storage::close(); - bool runtime_error_bad_db_name = false; try { - Storage::init("~/"); + SqliteStorage("~/"); } catch (runtime_error) { runtime_error_bad_db_name = true; } REQUIRE(runtime_error_bad_db_name == true); - Storage::init("test1.db"); + SqliteStorage("test1.db"); } SECTION("should be able to insert,select and delete Payload objects to and from the database") { + SqliteStorage storage("test1.db"); Payload p; p.add("e", "pv"); p.add("p", "srv"); @@ -68,87 +44,81 @@ TEST_CASE("storage") { // INSERT 50 rows for (int i = 0; i < 50; i++) { - storage->insert_payload(p); + storage.add_event(p); } // SELECT one row - list *event_list = new list; - storage->select_all_event_rows(event_list); + list *event_list = new list; + storage.get_all_event_rows(event_list); REQUIRE(50 == event_list->size()); - for (list::iterator it = event_list->begin(); it != event_list->end(); ++it) { + for (list::iterator it = event_list->begin(); it != event_list->end(); ++it) { REQUIRE("pv" == it->event.get()["e"]); REQUIRE("srv" == it->event.get()["p"]); REQUIRE("cpp-0.1.0" == it->event.get()["tv"]); } event_list->clear(); - storage->select_event_row_range(event_list, 100); + storage.get_event_rows_batch(event_list, 100); REQUIRE(50 == event_list->size()); event_list->clear(); - storage->select_event_row_range(event_list, 5); + storage.get_event_rows_batch(event_list, 5); REQUIRE(5 == event_list->size()); // DELETE rows by id - list *id_list = new list; - for (list::iterator it = event_list->begin(); it != event_list->end(); ++it) { - id_list->push_back(it->id); + list id_list; + for (list::iterator it = event_list->begin(); it != event_list->end(); ++it) { + id_list.push_back(it->id); } - storage->delete_event_row_ids(id_list); + storage.delete_event_rows_with_ids(id_list); event_list->clear(); - id_list->clear(); - delete (id_list); - storage->select_event_row_range(event_list, 100); + storage.get_event_rows_batch(event_list, 100); REQUIRE(45 == event_list->size()); event_list->clear(); // DELETE all rows - storage->delete_all_event_rows(); - storage->select_all_event_rows(event_list); + storage.delete_all_event_rows(); + storage.get_all_event_rows(event_list); REQUIRE(0 == event_list->size()); event_list->clear(); // Delete memory for list delete (event_list); - storage->delete_all_event_rows(); - Storage::close(); + storage.delete_all_event_rows(); } SECTION("should be able to insert only one session object into the database") { - list *session_rows = new list; + SqliteStorage storage("test1.db"); // Insert and check row json j = "{\"storage\":\"SQLITE\",\"previousSessionId\":null}"_json; - storage->insert_update_session(j); - storage->select_all_session_rows(session_rows); + storage.set_session(j); + auto session = storage.get_session(); - REQUIRE(1 == session_rows->size()); - REQUIRE("{\"previousSessionId\":null,\"storage\":\"SQLITE\"}" == session_rows->front().dump()); - session_rows->clear(); + REQUIRE(session); + REQUIRE("{\"previousSessionId\":null,\"storage\":\"SQLITE\"}" == session->dump()); // Check we can only insert one row for (int i = 0; i < 50; i++) { - storage->insert_update_session(j); + storage.set_session(j); } - storage->select_all_session_rows(session_rows); + session = storage.get_session(); - REQUIRE(1 == session_rows->size()); - REQUIRE("{\"previousSessionId\":null,\"storage\":\"SQLITE\"}" == session_rows->front().dump()); - session_rows->clear(); + REQUIRE(session); + REQUIRE("{\"previousSessionId\":null,\"storage\":\"SQLITE\"}" == session->dump()); // Check we can update the row values j = "{\"storage\":\"SQLITE\",\"previousSessionId\":\"a_value\"}"_json; - storage->insert_update_session(j); - storage->select_all_session_rows(session_rows); + storage.set_session(j); + session = storage.get_session(); - REQUIRE(1 == session_rows->size()); - REQUIRE("{\"previousSessionId\":\"a_value\",\"storage\":\"SQLITE\"}" == session_rows->front().dump()); - session_rows->clear(); + REQUIRE(session); + REQUIRE("{\"previousSessionId\":\"a_value\",\"storage\":\"SQLITE\"}" == session->dump()); - // Delete memory for list - delete (session_rows); - storage->delete_all_session_rows(); - Storage::close(); + // Delete session and check that it is deleted + storage.delete_session(); + session = storage.get_session(); + REQUIRE(!session); } } diff --git a/test/tracker_test.cpp b/test/tracker_test.cpp index 90e8d07..6f03c6a 100644 --- a/test/tracker_test.cpp +++ b/test/tracker_test.cpp @@ -19,6 +19,7 @@ See the Apache License Version 2.0 for the specific language governing permissio #include "../src/events/screen_view_event.hpp" #include "../src/events/self_describing_event.hpp" #include "../src/events/timing_event.hpp" +#include "../src/storage/sqlite_storage.hpp" #include "http/test_http_client.hpp" #include "catch.hpp" @@ -28,6 +29,7 @@ using std::runtime_error; using std::to_string; TEST_CASE("tracker") { + auto storage = std::make_shared("test-tracker.db"); // --- Emitter Mock @@ -37,7 +39,7 @@ TEST_CASE("tracker") { vector m_payloads; public: - MockEmitter() : Emitter("com.acme", Emitter::Method::POST, Emitter::Protocol::HTTP, 0, 0, 0, "test-tracker.db", unique_ptr(new TestHttpClient())) {} + MockEmitter(std::shared_ptr event_store) : Emitter("com.acme", Emitter::Method::POST, Emitter::Protocol::HTTP, 0, 0, 0, move(event_store), unique_ptr(new TestHttpClient())) {} void start() { m_started = true; } void stop() { m_started = false; } void add(Payload payload) { m_payloads.push_back(payload); } @@ -47,7 +49,7 @@ TEST_CASE("tracker") { }; SECTION("Mock emitter stores payloads") { - MockEmitter e; + MockEmitter e(storage); e.start(); REQUIRE(e.is_started() == true); @@ -68,7 +70,7 @@ TEST_CASE("tracker") { } REQUIRE(runtime_exception_on_not_init == true); - MockEmitter e; + MockEmitter e(storage); Tracker *t = Tracker::init(e, NULL, NULL, NULL, NULL, NULL, NULL, NULL); runtime_exception_on_not_init = false; @@ -83,8 +85,8 @@ TEST_CASE("tracker") { } SECTION("Tracker returns unique event ID") { - MockEmitter e; - ClientSession cs("test-tracker.db", 5000, 5000); + MockEmitter e(storage); + ClientSession cs(storage, 5000, 5000); string platform = "pc"; string app_id = "snowplow-test-suite"; string name_space = "snowplow-testing"; @@ -105,8 +107,8 @@ TEST_CASE("tracker") { } SECTION("Tracker controls should provide expected behaviour") { - MockEmitter e; - ClientSession cs("test-tracker.db", 5000, 5000); + MockEmitter e(storage); + ClientSession cs(storage, 5000, 5000); string platform = "pc"; string app_id = "snowplow-test-suite"; string name_space = "snowplow-testing"; @@ -174,7 +176,7 @@ TEST_CASE("tracker") { // --- Tracker Defaults SECTION("Tracker adds default fields to each payload") { - MockEmitter e; + MockEmitter e(storage); Tracker *t = Tracker::init(e, NULL, NULL, NULL, NULL, NULL, NULL, NULL); REQUIRE(e.is_started() == true); @@ -194,7 +196,7 @@ TEST_CASE("tracker") { } SECTION("Tracker can change default fields") { - MockEmitter e; + MockEmitter e(storage); string plat = "mob"; string app_id = "app-id"; @@ -265,7 +267,7 @@ TEST_CASE("tracker") { bool is_arg_exception_empty_category; bool is_arg_exception_empty_action; - MockEmitter e; + MockEmitter e(storage); Tracker *t = Tracker::init(e, NULL, NULL, NULL, NULL, NULL, NULL, NULL); StructuredEvent sv("", "hello"); @@ -340,7 +342,7 @@ TEST_CASE("tracker") { } SECTION("track ScreenViewEvent generates sane event") { - MockEmitter e; + MockEmitter e(storage); Tracker *t = Tracker::init(e, NULL, NULL, NULL, NULL, NULL, NULL, NULL); ScreenViewEvent se; @@ -405,7 +407,7 @@ TEST_CASE("tracker") { } SECTION("track TimingEvent generates a sane event") { - MockEmitter e; + MockEmitter e(storage); Tracker *t = Tracker::init(e, NULL, NULL, NULL, NULL, NULL, NULL, NULL); TimingEvent te("category", "variable", 123); @@ -477,7 +479,7 @@ TEST_CASE("tracker") { } SECTION("track SelfDescribingEvent generates a sane event") { - MockEmitter e; + MockEmitter e(storage); bool desktop_context = false; Tracker *t = Tracker::init(e, NULL, NULL, NULL, NULL, NULL, NULL, NULL); diff --git a/test/utils_test.cpp b/test/utils_test.cpp index 3ba185d..2a4162a 100644 --- a/test/utils_test.cpp +++ b/test/utils_test.cpp @@ -40,17 +40,14 @@ TEST_CASE("utils") { } SECTION("int_list_to_string will successfully convert a list of integers to a string") { - list *int_list = new list; - int_list->push_back(1); - int_list->push_back(2); - int_list->push_back(3); - int_list->push_back(4); - int_list->push_back(5); + list int_list; + int_list.push_back(1); + int_list.push_back(2); + int_list.push_back(3); + int_list.push_back(4); + int_list.push_back(5); REQUIRE("1,2,3,4,5" == Utils::int_list_to_string(int_list, ",")); - - int_list->clear(); - delete (int_list); } SECTION("map_to_query_string should correctly convert a map to a query string") { From 2450733dd0495a0f5763e12b91ed775997cfc0a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matu=CC=81s=CC=8C=20Tomlein?= Date: Wed, 20 Apr 2022 15:18:02 +0200 Subject: [PATCH 10/35] Prepare for 0.3.0 release --- CHANGELOG | 12 ++++++++++++ README.md | 2 +- src/constants.hpp | 2 +- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/CHANGELOG b/CHANGELOG index feef082..052fb33 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,3 +1,15 @@ +Version 0.3.0 (2022-04-19) +-------------------------- +Add support for Linux in HTTP client (#5) +Add CI build on Linux and update build instructions (#50) +Add support for Linux in desktop context (#47) +Add support for Linux when generating UUIDs (#46) +Use a common Event base class for all event types (#51) +Add emitter callback function (#7) +Add customizable no-retry HTTP status codes (#54) +Expose an interface for storage and make it configurable (#48) +Add a header file that includes all the published APIs (#55) + Version 0.2.0 (2022-02-24) -------------------------- Update session when events are tracked (#35) diff --git a/README.md b/README.md index 159c994..746caa9 100644 --- a/README.md +++ b/README.md @@ -178,7 +178,7 @@ limitations under the License. [travis-image]: https://travis-ci.org/snowplow/snowplow-cpp-tracker.png?branch=master [travis]: https://travis-ci.org/snowplow/snowplow-cpp-tracker -[release-image]: https://img.shields.io/badge/release-0.2.0-6ad7e5.svg?style=flat +[release-image]: https://img.shields.io/badge/release-0.3.0-6ad7e5.svg?style=flat [releases]: https://github.com/snowplow/snowplow-cpp-tracker/releases [license-image]: https://img.shields.io/badge/license-Apache--2-blue.svg?style=flat diff --git a/src/constants.hpp b/src/constants.hpp index b45a723..45df31e 100644 --- a/src/constants.hpp +++ b/src/constants.hpp @@ -21,7 +21,7 @@ using std::string; using std::set; namespace snowplow { -const string SNOWPLOW_TRACKER_VERSION_LABEL = "cpp-0.2.0"; +const string SNOWPLOW_TRACKER_VERSION_LABEL = "cpp-0.3.0"; // post requests const string SNOWPLOW_POST_PROTOCOL_VENDOR = "com.snowplowanalytics.snowplow"; From f2514a3150d8d3bbb8b5b2488f10814ad282a78a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=BA=C5=A1=20Tomlein?= Date: Wed, 27 Apr 2022 10:54:57 +0200 Subject: [PATCH 11/35] Rename send_limit to batch_size in Emitter (close #63) PR #69 --- src/emitter.cpp | 10 +++++----- src/emitter.hpp | 14 +++++++------- test/emitter_test.cpp | 4 ++-- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/emitter.cpp b/src/emitter.cpp index af8f883..409f8e4 100644 --- a/src/emitter.cpp +++ b/src/emitter.cpp @@ -47,11 +47,11 @@ unique_ptr createDefaultHttpClient() { } #endif -Emitter::Emitter(const string &uri, Method method, Protocol protocol, int send_limit, - int byte_limit_post, int byte_limit_get, shared_ptr event_store) : Emitter(uri, method, protocol, send_limit, byte_limit_post, byte_limit_get, std::move(event_store), createDefaultHttpClient()) { +Emitter::Emitter(const string &uri, Method method, Protocol protocol, int batch_size, + int byte_limit_post, int byte_limit_get, shared_ptr event_store) : Emitter(uri, method, protocol, batch_size, byte_limit_post, byte_limit_get, std::move(event_store), createDefaultHttpClient()) { } -Emitter::Emitter(const string &uri, Method method, Protocol protocol, int send_limit, +Emitter::Emitter(const string &uri, Method method, Protocol protocol, int batch_size, int byte_limit_post, int byte_limit_get, shared_ptr event_store, unique_ptr http_client) : m_url(this->get_collector_url(uri, protocol, method)) { if (uri == "") { @@ -74,7 +74,7 @@ Emitter::Emitter(const string &uri, Method method, Protocol protocol, int send_l this->m_running = false; this->m_method = method; - this->m_send_limit = send_limit; + this->m_batch_size = batch_size; this->m_byte_limit_post = byte_limit_post; this->m_byte_limit_get = byte_limit_get; this->m_event_store = move(event_store); @@ -133,7 +133,7 @@ void Emitter::flush() { void Emitter::run() { do { list event_rows; - m_event_store->get_event_rows_batch(&event_rows, m_send_limit); + m_event_store->get_event_rows_batch(&event_rows, m_batch_size); if (event_rows.size() > 0) { // emit the events diff --git a/src/emitter.hpp b/src/emitter.hpp index b0161c5..e0ec112 100644 --- a/src/emitter.hpp +++ b/src/emitter.hpp @@ -91,12 +91,12 @@ class Emitter { * @param uri The URI to send events to * @param method The request type to use (GET or POST) * @param protocol The protocol to use (http or https) - * @param send_limit The maximum amount of events to send at a time + * @param batch_size The maximum amount of events to send at a time * @param byte_limit_post The byte limit when sending a POST request * @param byte_limit_get The byte limit when sending a GET request * @param event_store Defines the database to use for event queue */ - Emitter(const string & uri, Method method, Protocol protocol, int send_limit, + Emitter(const string & uri, Method method, Protocol protocol, int batch_size, int byte_limit_post, int byte_limit_get, shared_ptr event_store); /** @@ -105,13 +105,13 @@ class Emitter { * @param uri The URI to send events to * @param method The request type to use (GET or POST) * @param protocol The protocol to use (http or https) - * @param send_limit The maximum amount of events to send at a time + * @param batch_size The maximum amount of events to send at a time * @param byte_limit_post The byte limit when sending a POST request * @param byte_limit_get The byte limit when sending a GET request * @param event_store Defines the database to use for event queue * @param http_client Unique pointer to a custom HTTP client to send GET and POST requests with */ - Emitter(const string & uri, Method method, Protocol protocol, int send_limit, + Emitter(const string & uri, Method method, Protocol protocol, int batch_size, int byte_limit_post, int byte_limit_get, shared_ptr event_store, unique_ptr http_client); ~Emitter(); @@ -152,11 +152,11 @@ class Emitter { Method get_method() const { return m_method; } /** - * @brief Get the send limit. + * @brief Get the batch size. * * @return unsigned int The maximum amount of events to send at a time */ - unsigned int get_send_limit() const { return m_send_limit; } + unsigned int get_batch_size() const { return m_batch_size; } /** * @brief Get the byte limit for GET. @@ -208,7 +208,7 @@ class Emitter { Method m_method; shared_ptr m_event_store; unique_ptr m_http_client; - unsigned int m_send_limit; + unsigned int m_batch_size; unsigned int m_byte_limit_get; unsigned int m_byte_limit_post; diff --git a/test/emitter_test.cpp b/test/emitter_test.cpp index b1aa073..36aef67 100644 --- a/test/emitter_test.cpp +++ b/test/emitter_test.cpp @@ -81,7 +81,7 @@ TEST_CASE("emitter") { REQUIRE(false == emitter.is_running()); REQUIRE("http://com.acme.collector/com.snowplowanalytics.snowplow/tp2" == emitter.get_cracked_url().to_string()); REQUIRE(Emitter::Method::POST == emitter.get_method()); - REQUIRE(500 == emitter.get_send_limit()); + REQUIRE(500 == emitter.get_batch_size()); REQUIRE(52000 == emitter.get_byte_limit_post()); REQUIRE(51000 == emitter.get_byte_limit_get()); @@ -106,7 +106,7 @@ TEST_CASE("emitter") { REQUIRE(false == emitter_1.is_running()); REQUIRE("https://com.acme.collector/i" == emitter_1.get_cracked_url().to_string()); REQUIRE(Emitter::Method::GET == emitter_1.get_method()); - REQUIRE(500 == emitter_1.get_send_limit()); + REQUIRE(500 == emitter_1.get_batch_size()); REQUIRE(52000 == emitter_1.get_byte_limit_post()); REQUIRE(51000 == emitter_1.get_byte_limit_get()); From f5ef16e549dbaa371699f67f39a0a672812bfa89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=BA=C5=A1=20Tomlein?= Date: Wed, 27 Apr 2022 10:55:24 +0200 Subject: [PATCH 12/35] Add event-level Subject (close #61) PR #71 --- src/events/event.cpp | 9 ++++++++ src/events/event.hpp | 14 +++++++++++- src/tracker.cpp | 6 +++++ test/tracker_test.cpp | 52 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 80 insertions(+), 1 deletion(-) diff --git a/src/events/event.cpp b/src/events/event.cpp index c917e25..e916e4a 100644 --- a/src/events/event.cpp +++ b/src/events/event.cpp @@ -17,6 +17,7 @@ See the Apache License Version 2.0 for the specific language governing permissio using namespace snowplow; using std::to_string; using std::invalid_argument; +using std::move; Event::Event() { this->m_true_timestamp = NULL; @@ -57,6 +58,10 @@ unsigned long long *Event::get_true_timestamp() const { return m_true_timestamp; } +shared_ptr Event::get_subject() const { + return m_subject; +} + // --- Setters void Event::set_true_timestamp(unsigned long long *true_timestamp) { @@ -66,3 +71,7 @@ void Event::set_true_timestamp(unsigned long long *true_timestamp) { void Event::set_context(const vector &context) { m_context = context; } + +void Event::set_subject(shared_ptr subject) { + m_subject = move(subject); +} diff --git a/src/events/event.hpp b/src/events/event.hpp index 93e108c..5cb259b 100644 --- a/src/events/event.hpp +++ b/src/events/event.hpp @@ -16,11 +16,13 @@ See the Apache License Version 2.0 for the specific language governing permissio #include "../payload/self_describing_json.hpp" #include "../payload/event_payload.hpp" +#include "../subject.hpp" #include #include using std::string; using std::vector; +using std::shared_ptr; namespace snowplow { /** @@ -61,6 +63,13 @@ class Event { */ void set_true_timestamp(unsigned long long *true_timestamp); + /** + * @brief Set the optional subject object to supply additional information to the event. + * + * @param subject Shared pointer to Subject instance + */ + void set_subject(shared_ptr subject); + protected: /** * @brief This function is overriden by concrete event classes and returns payload with properties for the event types. @@ -80,9 +89,12 @@ class Event { EventPayload get_self_describing_event_payload(const SelfDescribingJson &event, bool use_base64) const; private: + EventPayload get_payload(bool use_base64) const; + shared_ptr get_subject() const; + unsigned long long *m_true_timestamp; vector m_context; - EventPayload get_payload(bool use_base64) const; + shared_ptr m_subject; friend class Tracker; }; diff --git a/src/tracker.cpp b/src/tracker.cpp index af9e160..8d3feea 100644 --- a/src/tracker.cpp +++ b/src/tracker.cpp @@ -95,6 +95,7 @@ void Tracker::set_subject(Subject *subject) { string Tracker::track(const Event &event) { EventPayload payload = event.get_payload(m_use_base64); vector context = event.get_context(); + auto event_subject = event.get_subject(); // Add standard KV Pairs payload.add(SNOWPLOW_TRACKER_VERSION, SNOWPLOW_TRACKER_VERSION_LABEL); @@ -107,6 +108,11 @@ string Tracker::track(const Event &event) { payload.add_map(this->m_subject->get_map()); } + // Add event subject pairs + if (event_subject) { + payload.add_map(event_subject->get_map()); + } + // Add Client Session if available if (this->m_client_session) { context.push_back(this->m_client_session->update_and_get_session_context(payload.get_event_id())); diff --git a/test/tracker_test.cpp b/test/tracker_test.cpp index 6f03c6a..fbf07ee 100644 --- a/test/tracker_test.cpp +++ b/test/tracker_test.cpp @@ -510,4 +510,56 @@ TEST_CASE("tracker") { Tracker::close(); } + + SECTION("adds payload from event Subject instance") { + MockEmitter e(storage); + + bool desktop_context = false; + Tracker *t = Tracker::init(e, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + + SelfDescribingJson sdj("schema", "{ \"hello\":\"world\" }"_json); + SelfDescribingEvent sde(sdj); + + shared_ptr subject = std::make_shared(); + subject->set_user_id("the_user"); + sde.set_subject(subject); + t->track(sde); + + REQUIRE(e.get_added_payloads().size() == 1); + + auto payload = e.get_added_payloads()[0].get(); + + REQUIRE(payload[SNOWPLOW_UID] == "the_user"); + + Tracker::close(); + } + + SECTION("event-level subject overries tracker subject properties") { + MockEmitter e(storage); + + bool desktop_context = false; + Subject trackerSubject; + trackerSubject.set_user_id("u1"); + trackerSubject.set_language("en"); + Tracker *t = Tracker::init(e, &trackerSubject, NULL, NULL, NULL, NULL, NULL, NULL); + + SelfDescribingJson sdj("schema", "{ \"hello\":\"world\" }"_json); + SelfDescribingEvent sde(sdj); + + shared_ptr eventSubject = std::make_shared(); + eventSubject->set_user_id("u2"); + eventSubject->set_timezone("GMT"); + sde.set_subject(eventSubject); + t->track(sde); + + REQUIRE(e.get_added_payloads().size() == 1); + + auto payload = e.get_added_payloads()[0].get(); + + REQUIRE(payload[SNOWPLOW_UID] == "u2"); + REQUIRE(payload[SNOWPLOW_LANGUAGE] == "en"); + REQUIRE(payload[SNOWPLOW_TIMEZONE] == "GMT"); + + Tracker::close(); + } } From 97b93040c8509143e039ec6ca85669fc634956e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=BA=C5=A1=20Tomlein?= Date: Wed, 27 Apr 2022 10:56:19 +0200 Subject: [PATCH 13/35] Add IP address property to Subject (close #62) PR #70 --- src/constants.hpp | 1 + src/subject.cpp | 4 ++++ src/subject.hpp | 37 ++++++++++++++++++++++--------------- test/subject_test.cpp | 5 +++++ 4 files changed, 32 insertions(+), 15 deletions(-) diff --git a/src/constants.hpp b/src/constants.hpp index 45df31e..a9912cd 100644 --- a/src/constants.hpp +++ b/src/constants.hpp @@ -70,6 +70,7 @@ const string SNOWPLOW_COLOR_DEPTH = "cd"; const string SNOWPLOW_TIMEZONE = "tz"; const string SNOWPLOW_LANGUAGE = "lang"; const string SNOWPLOW_USERAGENT = "ua"; +const string SNOWPLOW_IP_ADDRESS = "ip"; // structured event const string SNOWPLOW_SE_CATEGORY = "se_ca"; diff --git a/src/subject.cpp b/src/subject.cpp index 864de04..b3d5e72 100644 --- a/src/subject.cpp +++ b/src/subject.cpp @@ -45,6 +45,10 @@ void Subject::set_useragent(const string &useragent) { this->m_payload.add(SNOWPLOW_USERAGENT, useragent); } +void Subject::set_ip_address(const string &ip_address) { + this->m_payload.add(SNOWPLOW_IP_ADDRESS, ip_address); +} + map Subject::get_map() { return this->m_payload.get(); } diff --git a/src/subject.hpp b/src/subject.hpp index c7a61c9..fed3266 100644 --- a/src/subject.hpp +++ b/src/subject.hpp @@ -14,10 +14,10 @@ See the Apache License Version 2.0 for the specific language governing permissio #ifndef SUBJECT_H #define SUBJECT_H +#include "constants.hpp" +#include "payload/payload.hpp" #include #include -#include "payload/payload.hpp" -#include "constants.hpp" using std::map; using std::string; @@ -33,14 +33,14 @@ class Subject { public: /** * @brief Set the business user ID string - * + * * @param user_id Business user ID */ - void set_user_id(const string & user_id); + void set_user_id(const string &user_id); /** * @brief Set the device screen resolution - * + * * @param width Device screen resolution width * @param height Device screen resolution height */ @@ -48,7 +48,7 @@ class Subject { /** * @brief Set the device viewport dimensions - * + * * @param width Device viewport width * @param height Device viewport height */ @@ -56,39 +56,46 @@ class Subject { /** * @brief Set the bit depth of the device’s color palette for displaying images - * + * * @param depth Device color depth */ void set_color_depth(int depth); /** * @brief Set the user’s timezone. - * + * * @param timezone User's timezone (e.g., "Europe/London") */ - void set_timezone(const string & timezone); + void set_timezone(const string &timezone); /** * @brief Set the user's language - * + * * @param language User's language (e.g., "en") */ - void set_language(const string & language); + void set_language(const string &language); /** * @brief Set the useragent string for the event - * + * * @param user_agent Standard formatted useragent string (e.g., "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_4)...") */ - void set_useragent(const string & user_agent); + void set_useragent(const string &user_agent); + + /** + * @brief Set the user's IP address + * + * @param user_agent IP address as string + */ + void set_ip_address(const string &ip_address); /** * @brief Get the subject properties as a map of strings - * + * * @return map Subject properties to be added to events */ map get_map(); }; -} +} // namespace snowplow #endif diff --git a/test/subject_test.cpp b/test/subject_test.cpp index af2133e..3e210ce 100644 --- a/test/subject_test.cpp +++ b/test/subject_test.cpp @@ -55,4 +55,9 @@ TEST_CASE("subject") { sub.set_useragent(useragent); REQUIRE(sub.get_map()["ua"] == useragent); } + + SECTION("set_ip_address adds a value for key ip") { + sub.set_ip_address("192.168.0.1"); + REQUIRE(sub.get_map()["ip"] == "192.168.0.1"); + } } From 62a30da04a7a243f4f33a98380631885425c3ab2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=BA=C5=A1=20Tomlein?= Date: Wed, 27 Apr 2022 11:24:44 +0200 Subject: [PATCH 14/35] Add back-off on 100% event sending failure (close #9) PR #68 --- README.md | 2 +- snowplow-cpp-tracker-example.vcxproj | 6 +- snowplow-cpp-tracker-example.vcxproj.filters | 10 ++- snowplow-cpp-tracker.vcxproj | 9 ++- snowplow-cpp-tracker.vcxproj.filters | 15 ++++- src/{ => emitter}/emitter.cpp | 20 +++++- src/{ => emitter}/emitter.hpp | 18 +++--- src/emitter/retry_delay.cpp | 58 ++++++++++++++++++ src/emitter/retry_delay.hpp | 58 ++++++++++++++++++ src/snowplow.hpp | 4 +- src/tracker.hpp | 2 +- test/{ => emitter}/emitter_test.cpp | 27 +++++++-- test/emitter/retry_delay_test.cpp | 64 ++++++++++++++++++++ test/http/test_http_client.cpp | 8 ++- test/http/test_http_client.hpp | 3 +- test/tracker_test.cpp | 2 +- 16 files changed, 271 insertions(+), 35 deletions(-) rename src/{ => emitter}/emitter.cpp (95%) rename src/{ => emitter}/emitter.hpp (95%) create mode 100644 src/emitter/retry_delay.cpp create mode 100644 src/emitter/retry_delay.hpp rename test/{ => emitter}/emitter_test.cpp (93%) create mode 100644 test/emitter/retry_delay_test.cpp diff --git a/README.md b/README.md index 746caa9..ad570b7 100644 --- a/README.md +++ b/README.md @@ -84,7 +84,7 @@ This will create two executables - the first is the test-suite which can be exec The other is an example program which will send one of every type of event to an endpoint of your choosing like so: ```bash - host> cd build + host> cd build/example host> ./tracker_example {{ your collector uri }} ``` diff --git a/snowplow-cpp-tracker-example.vcxproj b/snowplow-cpp-tracker-example.vcxproj index 19c5b8a..b5565a3 100644 --- a/snowplow-cpp-tracker-example.vcxproj +++ b/snowplow-cpp-tracker-example.vcxproj @@ -102,7 +102,8 @@ - + + @@ -125,7 +126,8 @@ - + + diff --git a/snowplow-cpp-tracker-example.vcxproj.filters b/snowplow-cpp-tracker-example.vcxproj.filters index cfe6dc9..4a11947 100644 --- a/snowplow-cpp-tracker-example.vcxproj.filters +++ b/snowplow-cpp-tracker-example.vcxproj.filters @@ -21,7 +21,10 @@ Source Files - + + Source Files + + Source Files @@ -86,7 +89,10 @@ Header Files - + + Header Files + + Header Files diff --git a/snowplow-cpp-tracker.vcxproj b/snowplow-cpp-tracker.vcxproj index 37a7d3c..57e913d 100644 --- a/snowplow-cpp-tracker.vcxproj +++ b/snowplow-cpp-tracker.vcxproj @@ -106,7 +106,8 @@ - + + @@ -124,7 +125,8 @@ - + + @@ -143,7 +145,8 @@ - + + diff --git a/snowplow-cpp-tracker.vcxproj.filters b/snowplow-cpp-tracker.vcxproj.filters index 6464b58..e89f930 100644 --- a/snowplow-cpp-tracker.vcxproj.filters +++ b/snowplow-cpp-tracker.vcxproj.filters @@ -21,7 +21,10 @@ Source Files - + + Source Files + + Source Files @@ -81,7 +84,10 @@ Source Files - + + Source Files + + Source Files @@ -125,7 +131,10 @@ Header Files - + + Header Files + + Header Files diff --git a/src/emitter.cpp b/src/emitter/emitter.cpp similarity index 95% rename from src/emitter.cpp rename to src/emitter/emitter.cpp index 409f8e4..c2a3172 100644 --- a/src/emitter.cpp +++ b/src/emitter/emitter.cpp @@ -26,22 +26,23 @@ using std::transform; using std::equal; using std::move; using std::future; +using std::this_thread::sleep_for; const int post_wrapper_bytes = 88; // "schema":"iglu:com.snowplowanalytics.snowplow/payload_data/jsonschema/1-0-4","data":[] const int post_stm_bytes = 22; // "stm":"1443452851000" #if defined(__APPLE__) -#include "http/http_client_apple.hpp" +#include "../http/http_client_apple.hpp" unique_ptr createDefaultHttpClient() { return unique_ptr(new HttpClientApple()); } #elif defined(WIN32) || defined(_WIN32) || defined(__WIN32) && !defined(__CYGWIN__) -#include "http/http_client_windows.hpp" +#include "../http/http_client_windows.hpp" unique_ptr createDefaultHttpClient() { return unique_ptr(new HttpClientWindows()); } #else -#include "http/http_client_curl.hpp" +#include "../http/http_client_curl.hpp" unique_ptr createDefaultHttpClient() { return unique_ptr(new HttpClientCurl()); } @@ -163,6 +164,19 @@ void Emitter::run() { delete_row_ids.splice(delete_row_ids.end(), success_row_ids); delete_row_ids.splice(delete_row_ids.end(), failed_wont_retry_row_ids); m_event_store->delete_event_rows_with_ids(delete_row_ids); + + // update retry delay calculation based on whether the requests will be retried + if (!failed_will_retry_row_ids.empty()) { + m_retry_delay.will_retry_emit(); + } else { + m_retry_delay.wont_retry_emit(); + } + + // sleep for the retry delay if there is one + auto retry_delay = m_retry_delay.get(); + if (retry_delay.count() > 0) { + sleep_for(retry_delay); + } } else { m_check_fin.notify_all(); diff --git a/src/emitter.hpp b/src/emitter/emitter.hpp similarity index 95% rename from src/emitter.hpp rename to src/emitter/emitter.hpp index e0ec112..22e52d5 100644 --- a/src/emitter.hpp +++ b/src/emitter/emitter.hpp @@ -20,14 +20,15 @@ See the Apache License Version 2.0 for the specific language governing permissio #include #include #include -#include "constants.hpp" -#include "utils.hpp" -#include "storage/event_store.hpp" -#include "payload/payload.hpp" -#include "payload/self_describing_json.hpp" -#include "cracked_url.hpp" -#include "http/http_request_result.hpp" -#include "http/http_client.hpp" +#include "../constants.hpp" +#include "../utils.hpp" +#include "../storage/event_store.hpp" +#include "../payload/payload.hpp" +#include "../payload/self_describing_json.hpp" +#include "../cracked_url.hpp" +#include "../http/http_request_result.hpp" +#include "../http/http_client.hpp" +#include "retry_delay.hpp" using std::string; using std::thread; @@ -222,6 +223,7 @@ class Emitter { EmitterCallback m_callback; EmitStatus m_callback_emit_status; map m_custom_retry_for_status_codes; + RetryDelay m_retry_delay; void run(); void do_send(const list &event_rows, list *results); diff --git a/src/emitter/retry_delay.cpp b/src/emitter/retry_delay.cpp new file mode 100644 index 0000000..1b1dad5 --- /dev/null +++ b/src/emitter/retry_delay.cpp @@ -0,0 +1,58 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#include "retry_delay.hpp" +#include + +using namespace snowplow; +using std::min; + +RetryDelay::RetryDelay(double base, double factor, int retry_count_cap, double jitter) { + m_base = base; + m_factor = factor; + m_retry_count_cap = retry_count_cap; + m_jitter = jitter; + m_retry_count = 0; +} + +void RetryDelay::will_retry_emit() { + m_retry_count++; +} + +void RetryDelay::wont_retry_emit() { + m_retry_count = 0; +} + +milliseconds RetryDelay::get() const { + if (m_retry_count == 0) { + return milliseconds(0); + } + + double delay_ms = m_base * pow(m_factor, min(m_retry_count, m_retry_count_cap) - 1); + + if (m_jitter != 0) { + std::random_device rd; + std::mt19937 mt(rd()); + std::uniform_real_distribution dist(0.0, 1.0); + double seed = dist(mt); + double deviation = floor(seed * m_jitter * delay_ms); + + if (round(seed) == 1) { + delay_ms -= deviation; + } else { + delay_ms += deviation; + } + } + + return milliseconds((unsigned long) delay_ms); +} diff --git a/src/emitter/retry_delay.hpp b/src/emitter/retry_delay.hpp new file mode 100644 index 0000000..c1abd6d --- /dev/null +++ b/src/emitter/retry_delay.hpp @@ -0,0 +1,58 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#ifndef RETRY_DELAY_H +#define RETRY_DELAY_H + +#include + +using std::chrono::milliseconds; + +namespace snowplow { +/** + * @brief Calculates exponential retry delay for Emitter based on the number of retry attempts. + */ +class RetryDelay { +public: + /** + * @brief Construct a new RetryDely object + * + * @param base Base delay in milliseconds + * @param factor Multiplicative factor for exponential function + * @param retry_count_cap Maximum retry count to consider in the retry delay calculation + * @param jitter Amount of randomness to introduce in the calculation (from 0 to 1) + */ + RetryDelay(double base = 100.0, double factor = 2.0, int retry_count_cap = 10, double jitter = 0.1); + + /** + * @brief Update retry delay considering that a new retry is planned. + */ + void will_retry_emit(); + + /** + * @brief Update retry delay considering that no more retries are planned. + */ + void wont_retry_emit(); + + milliseconds get() const; + +private: + int m_retry_count; + double m_base; + double m_factor; + int m_retry_count_cap; + double m_jitter; +}; +} // namespace snowplow + +#endif diff --git a/src/snowplow.hpp b/src/snowplow.hpp index 95b0129..1ca3ea1 100644 --- a/src/snowplow.hpp +++ b/src/snowplow.hpp @@ -19,10 +19,12 @@ See the Apache License Version 2.0 for the specific language governing permissio */ #include "client_session.hpp" -#include "emitter.hpp" #include "subject.hpp" #include "tracker.hpp" +// emitter +#include "emitter/emitter.hpp" + // storage #include "storage/event_row.hpp" #include "storage/event_store.hpp" diff --git a/src/tracker.hpp b/src/tracker.hpp index 4e42dc5..8dad530 100644 --- a/src/tracker.hpp +++ b/src/tracker.hpp @@ -15,7 +15,7 @@ See the Apache License Version 2.0 for the specific language governing permissio #define TRACKER_H #include -#include "emitter.hpp" +#include "emitter/emitter.hpp" #include "subject.hpp" #include "client_session.hpp" #include "events/event.hpp" diff --git a/test/emitter_test.cpp b/test/emitter/emitter_test.cpp similarity index 93% rename from test/emitter_test.cpp rename to test/emitter/emitter_test.cpp index 36aef67..72adee3 100644 --- a/test/emitter_test.cpp +++ b/test/emitter/emitter_test.cpp @@ -11,12 +11,12 @@ software distributed under the Apache License Version 2.0 is distributed on an See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. */ -#include "../src/emitter.hpp" -#include "../src/payload/event_payload.hpp" -#include "http/test_http_client.hpp" -#include "../src/storage/sqlite_storage.hpp" -#include "catch.hpp" -#include "http/test_http_client.hpp" +#include "../../src/emitter/emitter.hpp" +#include "../../src/payload/event_payload.hpp" +#include "../http/test_http_client.hpp" +#include "../../src/storage/sqlite_storage.hpp" +#include "../catch.hpp" +#include "../http/test_http_client.hpp" using namespace snowplow; using std::invalid_argument; @@ -337,4 +337,19 @@ TEST_CASE("emitter") { emitter.stop(); } + + SECTION("Emitter sleeps in between retries") { + Emitter emitter("com.acme.collector", Emitter::Method::POST, Emitter::Protocol::HTTP, 500, 500, 500, storage, unique_ptr(new TestHttpClient())); + + TestHttpClient::set_temporary_response_code(501, 5); // retry with 5 failures + auto t_start = std::chrono::high_resolution_clock::now(); + track_sample_event(emitter); + auto t_end = std::chrono::high_resolution_clock::now(); + double elapsed_time_ms = std::chrono::duration(t_end - t_start).count(); + REQUIRE(6 == TestHttpClient::get_requests_list().size()); + REQUIRE(1000 < elapsed_time_ms); + TestHttpClient::reset(); + + emitter.stop(); + } } diff --git a/test/emitter/retry_delay_test.cpp b/test/emitter/retry_delay_test.cpp new file mode 100644 index 0000000..18a9b6a --- /dev/null +++ b/test/emitter/retry_delay_test.cpp @@ -0,0 +1,64 @@ +/* +Copyright (c) 2022 Snowplow Analytics Ltd. All rights reserved. + +This program is licensed to you under the Apache License Version 2.0, +and you may not use this file except in compliance with the Apache License Version 2.0. +You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, +software distributed under the Apache License Version 2.0 is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. +*/ + +#include "../../src/emitter/retry_delay.hpp" +#include "../catch.hpp" +#include "../http/test_http_client.hpp" + +using namespace snowplow; +using std::chrono::milliseconds; + +TEST_CASE("RetryDelay") { + SECTION("retry delay is increasing as expected") { + RetryDelay retry_delay(100, 2, 10, 0); + REQUIRE(milliseconds(0) == retry_delay.get()); + retry_delay.will_retry_emit(); + REQUIRE(milliseconds(100) == retry_delay.get()); + retry_delay.will_retry_emit(); + REQUIRE(milliseconds(200) == retry_delay.get()); + retry_delay.will_retry_emit(); + REQUIRE(milliseconds(400) == retry_delay.get()); + retry_delay.will_retry_emit(); + REQUIRE(milliseconds(800) == retry_delay.get()); + } + + SECTION("retry delay stops increasing when retry count cap is reached") { + RetryDelay retry_delay(100, 2, 1, 0); + retry_delay.will_retry_emit(); + REQUIRE(milliseconds(100) == retry_delay.get()); + retry_delay.will_retry_emit(); + REQUIRE(milliseconds(100) == retry_delay.get()); + } + + SECTION("retry delay adds randomness") { + RetryDelay retry_delay(100, 2, 11, 0.1); + for (int i = 0; i < 11; i++) { + retry_delay.will_retry_emit(); + } + REQUIRE(retry_delay.get() != retry_delay.get()); + int expected = 102400; + REQUIRE(milliseconds(expected - expected / 10).count() < retry_delay.get().count()); + REQUIRE(milliseconds(expected + expected / 10).count() > retry_delay.get().count()); + } + + SECTION("retry delay resets if won't retry") { + RetryDelay retry_delay(100, 2, 10, 0); + auto delay0 = retry_delay.get(); + retry_delay.will_retry_emit(); + auto delay1 = retry_delay.get(); + retry_delay.wont_retry_emit(); + auto delay2 = retry_delay.get(); + REQUIRE(delay1 > delay2); + REQUIRE(delay0 == delay2); + } +} diff --git a/test/http/test_http_client.cpp b/test/http/test_http_client.cpp index deb881e..589124d 100644 --- a/test/http/test_http_client.cpp +++ b/test/http/test_http_client.cpp @@ -24,6 +24,7 @@ list TestHttpClient::requests_list; mutex TestHttpClient::log_read_write; int TestHttpClient::response_code = 200; int TestHttpClient::temporary_response_code = -1; +int TestHttpClient::temporary_response_code_remaining_attempts = 0; HttpRequestResult TestHttpClient::http_request(const RequestMethod method, CrackedUrl url, const string &query_string, const string &post_data, list row_ids, bool oversize) { lock_guard guard(log_read_write); @@ -44,15 +45,16 @@ void TestHttpClient::set_http_response_code(int http_response_code) { response_code = http_response_code; } -void TestHttpClient::set_temporary_response_code(int http_response_code) { +void TestHttpClient::set_temporary_response_code(int http_response_code, int number_of_attempts) { lock_guard guard(log_read_write); temporary_response_code = http_response_code; + temporary_response_code_remaining_attempts = number_of_attempts; } int TestHttpClient::fetch_response_code() { - if (temporary_response_code >= 0) { + if (temporary_response_code_remaining_attempts > 0) { int code = temporary_response_code; - temporary_response_code = -1; + temporary_response_code_remaining_attempts--; return code; } return response_code; diff --git a/test/http/test_http_client.hpp b/test/http/test_http_client.hpp index 3ab54dd..fed7e0b 100644 --- a/test/http/test_http_client.hpp +++ b/test/http/test_http_client.hpp @@ -44,10 +44,11 @@ class TestHttpClient : public HttpClient { static list requests_list; static int response_code; static int temporary_response_code; + static int temporary_response_code_remaining_attempts; static mutex log_read_write; static void set_http_response_code(int http_response_code); - static void set_temporary_response_code(int http_response_code); + static void set_temporary_response_code(int http_response_code, int number_of_attempts = 1); static list get_requests_list(); static void reset(); diff --git a/test/tracker_test.cpp b/test/tracker_test.cpp index fbf07ee..124e7dc 100644 --- a/test/tracker_test.cpp +++ b/test/tracker_test.cpp @@ -13,7 +13,7 @@ See the Apache License Version 2.0 for the specific language governing permissio #include "../include/base64.hpp" #include "../include/json.hpp" -#include "../src/emitter.hpp" +#include "../src/emitter/emitter.hpp" #include "../src/tracker.hpp" #include "../src/events/structured_event.hpp" #include "../src/events/screen_view_event.hpp" From 33febfa0b669d38e6737eb17ab54e4a0a6c57c84 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matu=CC=81s=CC=8C=20Tomlein?= Date: Wed, 27 Apr 2022 11:28:25 +0200 Subject: [PATCH 15/35] Prepare for 0.4.0 release --- CHANGELOG | 7 +++++++ README.md | 2 +- src/constants.hpp | 2 +- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/CHANGELOG b/CHANGELOG index 052fb33..bc09c3e 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,3 +1,10 @@ +Version 0.4.0 (2022-04-27) +-------------------------- +Add back-off on 100% event sending failure (#9) +Add event-level Subject (#61) +Add IP address property to Subject (#62) +Rename send_limit to batch_size in Emitter (#63) + Version 0.3.0 (2022-04-19) -------------------------- Add support for Linux in HTTP client (#5) diff --git a/README.md b/README.md index ad570b7..60a2c2c 100644 --- a/README.md +++ b/README.md @@ -178,7 +178,7 @@ limitations under the License. [travis-image]: https://travis-ci.org/snowplow/snowplow-cpp-tracker.png?branch=master [travis]: https://travis-ci.org/snowplow/snowplow-cpp-tracker -[release-image]: https://img.shields.io/badge/release-0.3.0-6ad7e5.svg?style=flat +[release-image]: https://img.shields.io/badge/release-0.4.0-6ad7e5.svg?style=flat [releases]: https://github.com/snowplow/snowplow-cpp-tracker/releases [license-image]: https://img.shields.io/badge/license-Apache--2-blue.svg?style=flat diff --git a/src/constants.hpp b/src/constants.hpp index a9912cd..e8b6d41 100644 --- a/src/constants.hpp +++ b/src/constants.hpp @@ -21,7 +21,7 @@ using std::string; using std::set; namespace snowplow { -const string SNOWPLOW_TRACKER_VERSION_LABEL = "cpp-0.3.0"; +const string SNOWPLOW_TRACKER_VERSION_LABEL = "cpp-0.4.0"; // post requests const string SNOWPLOW_POST_PROTOCOL_VENDOR = "com.snowplowanalytics.snowplow"; From e2153c46baaf707b2e4ba12eaf53437a0605fed0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=BA=C5=A1=20Tomlein?= Date: Thu, 12 May 2022 13:44:46 +0200 Subject: [PATCH 16/35] Publish API docs on Github Pages (close #73) PR #75 --- .github/workflows/docs.yml | 49 + .gitignore | 3 + Doxyfile | 2677 +++++++++++++++++ docs/01-setup.md | 21 + docs/02-initialisation.md | 131 + docs/03-adding-data.md | 149 + docs/04-tracking-events.md | 178 ++ docs/05-emitters.md | 118 + docs/06-client-sessions.md | 55 + docs/07-upgrading.md | 24 + docs/README.md | 9 + docs/img/.gitkeep | 0 .../styles/doxygen-awesome-darkmode-toggle.js | 157 + .../doxygen-awesome-fragment-copy-button.js | 85 + docs/styles/doxygen-awesome-paragraph-link.js | 51 + ...n-awesome-sidebar-only-darkmode-toggle.css | 40 + docs/styles/doxygen-awesome-sidebar-only.css | 113 + docs/styles/doxygen-awesome.css | 2135 +++++++++++++ .../doxygen-custom/custom-alternative.css | 54 + docs/styles/doxygen-custom/custom.css | 78 + docs/styles/doxygen-custom/header.html | 84 + .../toggle-alternative-theme.js | 12 + 22 files changed, 6223 insertions(+) create mode 100644 .github/workflows/docs.yml create mode 100644 Doxyfile create mode 100644 docs/01-setup.md create mode 100644 docs/02-initialisation.md create mode 100644 docs/03-adding-data.md create mode 100644 docs/04-tracking-events.md create mode 100644 docs/05-emitters.md create mode 100644 docs/06-client-sessions.md create mode 100644 docs/07-upgrading.md create mode 100644 docs/README.md create mode 100644 docs/img/.gitkeep create mode 100644 docs/styles/doxygen-awesome-darkmode-toggle.js create mode 100644 docs/styles/doxygen-awesome-fragment-copy-button.js create mode 100644 docs/styles/doxygen-awesome-paragraph-link.js create mode 100644 docs/styles/doxygen-awesome-sidebar-only-darkmode-toggle.css create mode 100644 docs/styles/doxygen-awesome-sidebar-only.css create mode 100644 docs/styles/doxygen-awesome.css create mode 100644 docs/styles/doxygen-custom/custom-alternative.css create mode 100644 docs/styles/doxygen-custom/custom.css create mode 100644 docs/styles/doxygen-custom/header.html create mode 100644 docs/styles/doxygen-custom/toggle-alternative-theme.js diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 0000000..c40b525 --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,49 @@ +name: Documentation + +on: + push: + tags: + - '*.*.*' + +jobs: + test: + runs-on: macos-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Make + run: make + + - name: Run tests + run: make unit-tests + + docs: + needs: ["test"] + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: install Doxygen 1.9.3 + run: | + sudo apt-get update + sudo apt-get install -y graphviz libclang-cpp1-9 libclang1-9 + wget https://www.doxygen.nl/files/doxygen-1.9.3.linux.bin.tar.gz + tar -xvzf doxygen-1.9.3.linux.bin.tar.gz + ln -s doxygen-1.9.3/bin/doxygen doxygen + + - name: Set version + run: echo "PROJECT_NUMBER = `git describe --tags`" >> Doxyfile + + - name: Generate Documentation + run: ./doxygen Doxyfile + + - name: Deploy to GitHub Pages + uses: peaceiris/actions-gh-pages@v3 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: ./docs/html/ diff --git a/.gitignore b/.gitignore index d8e9b12..75c43e7 100644 --- a/.gitignore +++ b/.gitignore @@ -36,3 +36,6 @@ *.gcov *.gcno *.gcda + +# Documentation +docs/html diff --git a/Doxyfile b/Doxyfile new file mode 100644 index 0000000..4762169 --- /dev/null +++ b/Doxyfile @@ -0,0 +1,2677 @@ +# Doxyfile 1.9.1 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the configuration +# file that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# https://www.gnu.org/software/libiconv/ for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = "Snowplow C++ Tracker" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = "Snowplow event tracker for C++. Add analytics to your C++ applications, games and servers" + +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = docs + +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all generated output in the proper direction. +# Possible values are: None, LTR, RTL and Context. +# The default value is: None. + +OUTPUT_TEXT_DIRECTION = None + +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = "The $name class" \ + "The $name widget" \ + "The $name file" \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = YES + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = NO + +# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line +# such as +# /*************** +# as being the beginning of a Javadoc-style comment "banner". If set to NO, the +# Javadoc-style will behave just like regular comments and it will not be +# interpreted by doxygen. +# The default value is: NO. + +JAVADOC_BANNER = NO + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# By default Python docstrings are displayed as preformatted text and doxygen's +# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the +# doxygen's special commands can be used and the contents of the docstring +# documentation blocks is shown as doxygen documentation. +# The default value is: YES. + +PYTHON_DOCSTRING = YES + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines (in the resulting output). You can put ^^ in the value part of an +# alias to insert a newline as if a physical newline was in the original file. +# When you need a literal { or } or , in the value part of an alias you have to +# escape them by means of a backslash (\), this can lead to conflicts with the +# commands \{ and \} for these it is advised to use the version @{ and @} or use +# a double escape (\\{ and \\}) + +ALIASES = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice +# sources only. Doxygen will then generate output that is more tailored for that +# language. For instance, namespaces will be presented as modules, types will be +# separated into more groups, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_SLICE = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, JavaScript, +# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL, +# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: +# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser +# tries to guess whether the code is fixed or free formatted code, this is the +# default for Fortran type files). For instance to make doxygen treat .inc files +# as Fortran files (default is PHP), and .f files as C (default is Fortran), +# use: inc=Fortran f=C. +# +# Note: For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. When specifying no_extension you should add +# * to the FILE_PATTERNS. +# +# Note see also the list of default file extension mappings. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See https://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up +# to that level are automatically included in the table of contents, even if +# they do not have an id attribute. +# Note: This feature currently applies only to Markdown headings. +# Minimum value: 0, maximum value: 99, default value: 5. +# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. + +TOC_INCLUDE_HEADINGS = 5 + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# If one adds a struct or class to a group and this option is enabled, then also +# any nested class or struct is added to the same group. By default this option +# is disabled and one has to add nested compounds explicitly via \ingroup. +# The default value is: NO. + +GROUP_NESTED_COMPOUNDS = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +# The NUM_PROC_THREADS specifies the number threads doxygen is allowed to use +# during processing. When set to 0 doxygen will based this on the number of +# cores available in the system. You can set it explicitly to a value larger +# than 0 to get more control over the balance between CPU load and processing +# speed. At this moment only the input processing can be done using multiple +# threads. Since this is still an experimental feature the default is set to 1, +# which efficively disables parallel processing. Please report any issues you +# encounter. Generating dot graphs in parallel is controlled by the +# DOT_NUM_THREADS setting. +# Minimum value: 0, maximum value: 32, default value: 1. + +NUM_PROC_THREADS = 1 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual +# methods of a class will be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIV_VIRTUAL = NO + +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. If set to YES, local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO, only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If this flag is set to YES, the name of an unnamed parameter in a declaration +# will be determined by the corresponding definition. By default unnamed +# parameters remain unnamed in the output. +# The default value is: YES. + +RESOLVE_UNNAMED_PARAMS = YES + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO, these classes will be included in the various overviews. This option +# has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# declarations. If set to NO, these declarations will be included in the +# documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO, these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# With the correct setting of option CASE_SENSE_NAMES doxygen will better be +# able to match the capabilities of the underlying filesystem. In case the +# filesystem is case sensitive (i.e. it supports files in the same directory +# whose names only differ in casing), the option must be set to YES to properly +# deal with such files in case they appear in the input. For filesystems that +# are not case sensitive the option should be be set to NO to properly deal with +# output files written for symbols that only differ in casing, such as for two +# classes, one named CLASS and the other named Class, and to also support +# references to files without having to specify the exact matching casing. On +# Windows (including Cygwin) and MacOS, users should typically set this option +# to NO, whereas on Linux or other Unix flavors it should typically be set to +# YES. +# The default value is: system dependent. + +CASE_SENSE_NAMES = NO + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES, the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO, doxygen will only warn about wrong or incomplete +# parameter documentation, but not about the absence of documentation. If +# EXTRACT_ALL is set to YES then this flag will automatically be disabled. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when +# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS +# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but +# at the end of the doxygen process doxygen will return with a non-zero status. +# Possible values are: NO, YES and FAIL_ON_WARNINGS. +# The default value is: NO. + +WARN_AS_ERROR = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING +# Note: If this tag is empty the current directory is searched. + +INPUT = src \ + docs/README.md \ + docs/01-setup.md \ + docs/02-initialisation.md \ + docs/03-adding-data.md \ + docs/04-tracking-events.md \ + docs/05-emitters.md \ + docs/06-client-sessions.md \ + docs/07-upgrading.md + + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: +# https://www.gnu.org/software/libiconv/) for the list of possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# read by doxygen. +# +# Note the list of default checked file patterns might differ from the list of +# default file extension mappings. +# +# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, +# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, +# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, +# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment), +# *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, *.vhdl, +# *.ucf, *.qsf and *.ice. + +FILE_PATTERNS = *.c \ + *.cc \ + *.cxx \ + *.cpp \ + *.c++ \ + *.java \ + *.ii \ + *.ixx \ + *.ipp \ + *.i++ \ + *.inl \ + *.idl \ + *.ddl \ + *.odl \ + *.h \ + *.hh \ + *.hxx \ + *.hpp \ + *.h++ \ + *.cs \ + *.d \ + *.php \ + *.php4 \ + *.php5 \ + *.phtml \ + *.inc \ + *.m \ + *.markdown \ + *.md \ + *.mm \ + *.dox \ + *.py \ + *.pyw \ + *.f90 \ + *.f95 \ + *.f03 \ + *.f08 \ + *.f18 \ + *.f \ + *.for \ + *.vhd \ + *.vhdl \ + *.ucf \ + *.qsf \ + *.ice + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = * + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = img docs/img + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = README.md + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# entity all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see https://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the +# clang parser (see: +# http://clang.llvm.org/) for more accurate parsing at the cost of reduced +# performance. This can be particularly helpful with template rich C++ code for +# which doxygen's built-in parser lacks the necessary type information. +# Note: The availability of this option depends on whether or not doxygen was +# generated with the -Duse_libclang=ON option for CMake. +# The default value is: NO. + +CLANG_ASSISTED_PARSING = NO + +# If clang assisted parsing is enabled and the CLANG_ADD_INC_PATHS tag is set to +# YES then doxygen will add the directory of each input to the include path. +# The default value is: YES. + +CLANG_ADD_INC_PATHS = YES + +# If clang assisted parsing is enabled you can provide the compiler with command +# line options that you would normally use when invoking the compiler. Note that +# the include paths will already be set by doxygen for the files and directories +# specified with INPUT and INCLUDE_PATH. +# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. + +CLANG_OPTIONS = + +# If clang assisted parsing is enabled you can provide the clang parser with the +# path to the directory containing a file called compile_commands.json. This +# file is the compilation database (see: +# http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html) containing the +# options used when the source files were built. This is equivalent to +# specifying the -p option to a clang tool, such as clang-check. These options +# will then be passed to the parser. Any options specified with CLANG_OPTIONS +# will be added as well. +# Note: The availability of this option depends on whether or not doxygen was +# generated with the -Duse_libclang=ON option for CMake. + +CLANG_DATABASE_PATH = + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = docs/styles/doxygen-custom/header.html + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). For an example see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = docs/styles/doxygen-awesome.css \ + docs/styles/doxygen-custom/custom.css \ + docs/styles/doxygen-awesome-sidebar-only.css \ + docs/styles/doxygen-awesome-sidebar-only-darkmode-toggle.css \ + docs/styles/doxygen-custom/custom-alternative.css \ + + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = docs/styles/doxygen-awesome-darkmode-toggle.js \ + docs/styles/doxygen-awesome-fragment-copy-button.js \ + docs/styles/doxygen-awesome-paragraph-link.js \ + docs/styles/doxygen-custom/toggle-alternative-theme.js + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the style sheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# https://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 209 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 255 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 113 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to YES can help to show when doxygen was last run and thus if the +# documentation is up to date. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = NO + +# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML +# documentation will contain a main index with vertical navigation menus that +# are dynamically created via JavaScript. If disabled, the navigation index will +# consists of multiple levels of tabs that are statically embedded in every HTML +# page. Disable this option to support browsers that do not have JavaScript, +# like the Qt help browser. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_MENUS = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: +# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To +# create a documentation set, doxygen will generate a Makefile in the HTML +# output directory. Running make will produce the docset in that directory and +# running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy +# genXcode/_index.html for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: +# https://www.microsoft.com/en-us/download/details.aspx?id=21138) on Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler (hhc.exe). If non-empty, +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the main .chm file (NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location (absolute path +# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to +# run qhelpgenerator on the generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = YES + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 335 + +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg +# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see +# https://inkscape.org) to generate formulas as SVG images instead of PNGs for +# the HTML output. These images will generally look nicer at scaled resolutions. +# Possible values are: png (the default) and svg (looks nicer but requires the +# pdf2svg or inkscape tool). +# The default value is: png. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FORMULA_FORMAT = png + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANSPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands +# to create new LaTeX commands to be used in formulas as building blocks. See +# the section "Including formulas" for details. + +FORMULA_MACROFILE = + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# https://www.mathjax.org) which uses client side JavaScript for the rendering +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from https://www.mathjax.org before deployment. +# The default value is: https://cdn.jsdelivr.net/npm/mathjax@2. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = https://cdn.jsdelivr.net/npm/mathjax@2 + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /