This commit is contained in:
Mars 2024-06-02 06:03:21 -04:00
parent 6e5045f1f4
commit 693fa17d10
Signed by: pupbrained
GPG key ID: 0FF5B8826803F895
266 changed files with 60543 additions and 1000 deletions

View file

@ -6,3 +6,9 @@ NamespaceIndentation: All
SpaceBeforeCpp11BracedList: true
SpacesBeforeTrailingComments: 1
AlignConsecutiveAssignments: true
IncludeBlocks: Regroup
IncludeCategories:
- Regex: '".*"'
Priority: 1
- Regex: '<.*>'
Priority: -1

View file

@ -46,7 +46,7 @@
];
};
stdenv = pkgs.llvmPackages_18.stdenv;
stdenv = pkgs.stdenvAdapters.useMoldLinker pkgs.llvmPackages_18.stdenv;
in
with pkgs; {
packages = rec {

View file

@ -1,18 +1,20 @@
project(
'draconis++', 'cpp',
version: '0.1.0',
default_options: [
'cpp_std=c++26',
'default_library=static',
'warning_level=everything'
'warning_level=everything',
'buildtype=debugoptimized'
]
)
clangtidy = find_program('clang-tidy', required: false)
cc = meson.get_compiler('cpp')
cpp = meson.get_compiler('cpp')
add_project_arguments(
cc.get_supported_arguments([
cpp.get_supported_arguments([
'-Wno-c++20-compat',
'-Wno-c++98-compat',
'-Wno-c++98-compat-pedantic',
@ -29,7 +31,8 @@ source_file_names = [
'src/main.cpp',
'src/os/os.h',
'src/config/config.cpp',
'src/config/config.h'
'src/config/config.h',
'src/config/weather.cpp'
]
if host_machine.system() == 'linux'
@ -46,16 +49,16 @@ foreach file : source_file_names
sources += files(file)
endforeach
quill = subproject('quill')
deps = []
deps += cc.find_library('cpr')
deps += cc.find_library('curl')
deps += cc.find_library('tomlplusplus')
deps += cpp.find_library('cpr')
deps += cpp.find_library('curl')
deps += cpp.find_library('tomlplusplus')
deps += dependency('boost', static: true)
deps += dependency('fmt')
fmtlog = subproject('fmtlog')
deps += fmtlog.get_variable('fmtlog_dep')
deps += quill.get_variable('quill_dep')
if host_machine.system() == 'linux'
deps += dependency('playerctl')

View file

@ -1,9 +1,10 @@
#include "config.h"
#include <fmt/core.h>
#include <toml++/toml.h>
#include <unistd.h>
#include <rfl.hpp>
#include <rfl/toml.hpp>
#include <toml++/toml.h>
#include <unistd.h>
#include "config.h"
#define DEFINE_GETTER(class_name, type, name) \
type class_name::get##name() const { \
@ -79,126 +80,3 @@ ConfigImpl ConfigImpl::from_class(const Config& config) noexcept {
Config ConfigImpl::to_class() const {
return {general, now_playing, weather};
}
boost::json::object Weather::getWeatherInfo() const {
using namespace std;
using namespace cpr;
using namespace boost;
using namespace std::chrono;
const Location loc = this->m_Location;
const string apiKey = this->m_ApiKey;
const string units = this->m_Units;
// Define cache file and cache duration
const string cacheFile = "/tmp/weather_cache.json";
constexpr minutes cacheDuration = minutes(10);
logi("Cache file: {}", cacheFile);
logi("Cache duration: {} minutes",
duration_cast<minutes>(cacheDuration).count());
// Function to read cache from file
auto readCacheFromFile =
[&]() -> optional<pair<json::object, system_clock::time_point>> {
ifstream ifs(cacheFile);
if (!ifs.is_open()) {
logi("Cache file not found.");
return nullopt;
}
logi("Reading from cache file...");
json::object cachedData;
system_clock::time_point timestamp;
try {
json::value val;
ifs >> val;
cachedData = val.as_object();
string tsStr = cachedData["timestamp"].as_string().c_str();
timestamp = system_clock::time_point(milliseconds(stoll(tsStr)));
cachedData.erase("timestamp");
} catch (...) {
loge("Failed to read from cache file.");
return nullopt;
}
logi("Successfully read from cache file.");
return make_pair(cachedData, timestamp);
};
// Function to write cache to file
auto writeCacheToFile = [&](const json::object& data) {
fmt::println("Writing to cache file...");
ofstream ofs(cacheFile);
if (!ofs.is_open()) {
loge("Failed to open cache file for writing.");
return;
}
json::object dataToWrite = data;
dataToWrite["timestamp"] = to_string(
duration_cast<milliseconds>(system_clock::now().time_since_epoch())
.count());
ofs << json::serialize(dataToWrite);
logi("Successfully wrote to cache file.");
};
// Check if cache is valid
if (auto cachedData = readCacheFromFile()) {
auto [data, timestamp] = *cachedData;
if (system_clock::now() - timestamp < cacheDuration) {
logi("Cache is valid. Returning cached data.");
return data;
}
logi("Cache is expired.");
} else {
logi("No valid cache found.");
}
json::object result;
if (holds_alternative<string>(loc)) {
const string city = get<string>(loc);
const char* location = curl_easy_escape(nullptr, city.c_str(),
static_cast<int>(city.length()));
logi("City: {}", location);
logi("Making API request for city: {}", city);
const Response res =
Get(Url {fmt::format("https://api.openweathermap.org/data/2.5/"
"weather?q={}&appid={}&units={}",
location, apiKey, units)});
logi("Received response from API.");
json::value json = json::parse(res.text);
result = json.as_object();
} else {
const auto [lat, lon] = get<Coords>(loc);
logi("Coordinates: lat = {:.3f}, lon = {:.3f}", lat, lon);
logi("Making API request for coordinates.");
const Response res =
Get(Url {fmt::format("https://api.openweathermap.org/data/2.5/"
"weather?lat={:.3f}&lon={:.3f}&appid={}&units={}",
lat, lon, apiKey, units)});
logi("Received response from API.");
json::value json = json::parse(res.text);
result = json.as_object();
}
// Update the cache with the new data
writeCacheToFile(result);
logi("Returning new data.");
return result;
}

View file

@ -1,26 +1,26 @@
#pragma once
#include <boost/json.hpp>
#include <cpr/cpr.h>
#include <fmt/core.h>
#include <fmtlog.h>
#include <toml++/toml.h>
#include <unistd.h>
#include <boost/json.hpp>
#include <rfl.hpp>
#include <string>
#include <toml++/impl/parser.hpp>
#include <toml++/toml.h>
#include <unistd.h>
#include <variant>
using std::string;
struct Coords {
double lat;
double lon;
};
using Location = std::variant<string, Coords>;
class Weather {
public:
struct Coords {
double lat;
double lon;
};
using Location = std::variant<string, Coords>;
private:
Location m_Location;
string m_ApiKey;
@ -36,7 +36,7 @@ class Weather {
};
struct WeatherImpl {
Location location;
Weather::Location location;
string api_key;
string units;
@ -68,7 +68,7 @@ class NowPlaying {
bool m_Enabled;
public:
NowPlaying(bool enable);
NowPlaying(bool enabled);
[[nodiscard]] bool getEnabled() const;
};
@ -107,6 +107,7 @@ struct ConfigImpl {
[[nodiscard]] Config to_class() const;
};
// Parsers for Config classes
namespace rfl::parsing {
template <class ReaderType, class WriterType, class ProcessorsType>
struct Parser<ReaderType, WriterType, Weather, ProcessorsType>

137
src/config/weather.cpp Normal file
View file

@ -0,0 +1,137 @@
#include <rfl.hpp>
#include <rfl/toml.hpp>
#include <toml++/toml.h>
#include <unistd.h>
#include "config.h"
using namespace std;
using namespace chrono;
using namespace boost;
// Function to read cache from file
optional<pair<json::object, system_clock::time_point>> ReadCacheFromFile() {
const string cacheFile = "/tmp/weather_cache.json";
ifstream ifs(cacheFile);
if (!ifs.is_open()) {
fmt::println("Cache file not found.");
return nullopt;
}
fmt::println("Reading from cache file...");
json::object cachedData;
system_clock::time_point timestamp;
try {
json::value val;
ifs >> val;
cachedData = val.as_object();
string tsStr = cachedData["timestamp"].as_string().c_str();
timestamp = system_clock::time_point(milliseconds(stoll(tsStr)));
cachedData.erase("timestamp");
} catch (...) {
fmt::println(stderr, "Failed to read from cache file.");
return nullopt;
}
fmt::println("Successfully read from cache file.");
return make_pair(cachedData, timestamp);
}
// Function to write cache to file
void WriteCacheToFile(const json::object& data) {
const string cacheFile = "/tmp/weather_cache.json";
fmt::println("Writing to cache file...");
ofstream ofs(cacheFile);
if (!ofs.is_open()) {
fmt::println(stderr, "Failed to open cache file for writing.");
return;
}
json::object dataToWrite = data;
dataToWrite["timestamp"] = to_string(
duration_cast<milliseconds>(system_clock::now().time_since_epoch())
.count());
ofs << json::serialize(dataToWrite);
fmt::println("Successfully wrote to cache file.");
}
// Function to make API request
json::object MakeApiRequest(const string& url) {
using namespace cpr;
fmt::println("Making API request...");
const Response res = Get(Url {url});
fmt::println("Received response from API.");
json::value json = json::parse(res.text);
return json.as_object();
}
// Core function to get weather information
json::object Weather::getWeatherInfo() const {
using namespace cpr;
const Location loc = m_Location;
const string apiKey = m_ApiKey;
const string units = m_Units;
// Check if cache is valid
if (auto cachedData = ReadCacheFromFile()) {
auto [data, timestamp] = *cachedData;
if (system_clock::now() - timestamp <
minutes(10)) { // Assuming cache duration is always 10 minutes
fmt::println("Cache is valid. Returning cached data.");
return data;
}
fmt::println("Cache is expired.");
} else {
fmt::println("No valid cache found.");
}
json::object result;
if (holds_alternative<string>(loc)) {
const string city = get<string>(loc);
const char* location = curl_easy_escape(nullptr, city.c_str(),
static_cast<int>(city.length()));
fmt::println("City: {}", location);
const string apiUrl = format(
"https://api.openweathermap.org/data/2.5/"
"weather?q={}&appid={}&units={}",
location, apiKey, units);
result = MakeApiRequest(apiUrl);
} else {
const auto [lat, lon] = get<Coords>(loc);
fmt::println("Coordinates: lat = {:.3f}, lon = {:.3f}", lat, lon);
const string apiUrl = format(
"https://api.openweathermap.org/data/2.5/"
"weather?lat={:.3f}&lon={:.3f}&appid={}&units={}",
lat, lon, apiKey, units);
result = MakeApiRequest(apiUrl);
}
// Update the cache with the new data
WriteCacheToFile(result);
fmt::println("Returning new data.");
return result;
}

View file

@ -1,18 +1,17 @@
#include <boost/json/src.hpp>
#include <cpr/cpr.h>
#include <ctime>
#include <curl/curl.h>
#include <fmt/chrono.h>
#include <fmt/core.h>
#include <fmt/format.h>
#include <boost/json/src.hpp>
#include <ctime>
#include <rfl.hpp>
#include <rfl/toml.hpp>
#include <rfl/toml/load.hpp>
#include <rfl/toml/read.hpp>
#include <toml++/toml.hpp>
#include <variant>
#include "config/config.h"
#include "fmtlog.h"
#include "os/os.h"
using std::string;
@ -69,9 +68,8 @@ int main() {
auto trimStart = [](std::string& str) {
auto start = str.begin();
while (start != str.end() && std::isspace(*start)) {
start++;
}
while (start != str.end() && std::isspace(*start))
++start;
str.erase(str.begin(), start);
};

View file

@ -1,2 +0,0 @@
[provide]
fmtlog = fmtlog_dep

View file

@ -1,826 +0,0 @@
/*
MIT License
Copyright (c) 2021 Meng Rao <raomeng1@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#pragma once
//#define FMT_HEADER_ONLY
#include "fmt/format.h"
#include <type_traits>
#include <vector>
#include <chrono>
#include <atomic>
#include <thread>
#include <memory>
#ifdef _MSC_VER
#include <intrin.h>
#endif
#ifdef _WIN32
#define FAST_THREAD_LOCAL thread_local
#else
#define FAST_THREAD_LOCAL __thread
#endif
// define FMTLOG_BLOCK=1 if log statment should be blocked when queue is full, instead of discarding the msg
#ifndef FMTLOG_BLOCK
#define FMTLOG_BLOCK 0
#endif
#define FMTLOG_LEVEL_DBG 0
#define FMTLOG_LEVEL_INF 1
#define FMTLOG_LEVEL_WRN 2
#define FMTLOG_LEVEL_ERR 3
#define FMTLOG_LEVEL_OFF 4
// define FMTLOG_ACTIVE_LEVEL to turn off low log level in compile time
#ifndef FMTLOG_ACTIVE_LEVEL
#define FMTLOG_ACTIVE_LEVEL FMTLOG_LEVEL_DBG
#endif
#ifndef FMTLOG_QUEUE_SIZE
#define FMTLOG_QUEUE_SIZE (1 << 20)
#endif
namespace fmtlogdetail {
template<typename Arg>
struct UnrefPtr : std::false_type
{ using type = Arg; };
template<>
struct UnrefPtr<char*> : std::false_type
{ using type = char*; };
template<>
struct UnrefPtr<void*> : std::false_type
{ using type = void*; };
template<typename Arg>
struct UnrefPtr<std::shared_ptr<Arg>> : std::true_type
{ using type = Arg; };
template<typename Arg, typename D>
struct UnrefPtr<std::unique_ptr<Arg, D>> : std::true_type
{ using type = Arg; };
template<typename Arg>
struct UnrefPtr<Arg*> : std::true_type
{ using type = Arg; };
}; // namespace fmtlogdetail
template<int __ = 0>
class fmtlogT
{
public:
enum LogLevel : uint8_t
{
DBG = 0,
INF,
WRN,
ERR,
OFF
};
// Preallocate thread queue for current thread
static void preallocate() noexcept;
// Set the file for logging
static void setLogFile(const char* filename, bool truncate = false);
// Set an existing FILE* for logging, if manageFp is false fmtlog will not buffer log internally
// and will not close the FILE*
static void setLogFile(FILE* fp, bool manageFp = false);
// Collect log msgs from all threads and write to log file
// If forceFlush = true, internal file buffer is flushed
// User need to call poll() repeatedly if startPollingThread is not used
static void poll(bool forceFlush = false);
// Set flush delay in nanosecond
// If there's msg older than ns in the buffer, flush will be triggered
static void setFlushDelay(int64_t ns) noexcept;
// If current msg has level >= flushLogLevel, flush will be triggered
static void flushOn(LogLevel flushLogLevel) noexcept;
// If file buffer has more than specified bytes, flush will be triggered
static void setFlushBufSize(uint32_t bytes) noexcept;
// callback signature user can register
// ns: nanosecond timestamp
// level: logLevel
// location: full file path with line num, e.g: /home/raomeng/fmtlog/fmtlog.h:45
// basePos: file base index in the location
// threadName: thread id or the name user set with setThreadName
// msg: full log msg with header
// bodyPos: log body index in the msg
// logFilePos: log file position of this msg
typedef void (*LogCBFn)(int64_t ns, LogLevel level, fmt::string_view location, size_t basePos,
fmt::string_view threadName, fmt::string_view msg, size_t bodyPos,
size_t logFilePos);
// Set a callback function for all log msgs with a mininum log level
static void setLogCB(LogCBFn cb, LogLevel minCBLogLevel) noexcept;
typedef void (*LogQFullCBFn)(void* userData);
static void setLogQFullCB(LogQFullCBFn cb, void* userData) noexcept;
// Close the log file and subsequent msgs will not be written into the file,
// but callback function can still be used
static void closeLogFile() noexcept;
// Set log header pattern with fmt named arguments
static void setHeaderPattern(const char* pattern);
// Set a name for current thread, it'll be shown in {t} part in header pattern
static void setThreadName(const char* name) noexcept;
// Set current log level, lower level log msgs will be discarded
static inline void setLogLevel(LogLevel logLevel) noexcept;
// Get current log level
static inline LogLevel getLogLevel() noexcept;
// return true if passed log level is not lower than current log level
static inline bool checkLogLevel(LogLevel logLevel) noexcept;
// Run a polling thread in the background with a polling interval in ns
// Note that user must not call poll() himself when the thread is running
static void startPollingThread(int64_t pollInterval = 1000000000) noexcept;
// Stop the polling thread
static void stopPollingThread() noexcept;
// https://github.com/MengRao/SPSC_Queue
class SPSCVarQueueOPT
{
public:
struct MsgHeader
{
inline void push(uint32_t sz) { *(volatile uint32_t*)&size = sz + sizeof(MsgHeader); }
uint32_t size;
uint32_t logId;
};
static constexpr uint32_t BLK_CNT = FMTLOG_QUEUE_SIZE / sizeof(MsgHeader);
MsgHeader* allocMsg(uint32_t size) noexcept;
MsgHeader* alloc(uint32_t size) {
size += sizeof(MsgHeader);
uint32_t blk_sz = (size + sizeof(MsgHeader) - 1) / sizeof(MsgHeader);
if (blk_sz >= free_write_cnt) {
uint32_t read_idx_cache = *(volatile uint32_t*)&read_idx;
if (read_idx_cache <= write_idx) {
free_write_cnt = BLK_CNT - write_idx;
if (blk_sz >= free_write_cnt && read_idx_cache != 0) { // wrap around
blk[0].size = 0;
blk[write_idx].size = 1;
write_idx = 0;
free_write_cnt = read_idx_cache;
}
}
else {
free_write_cnt = read_idx_cache - write_idx;
}
if (free_write_cnt <= blk_sz) {
return nullptr;
}
}
MsgHeader* ret = &blk[write_idx];
write_idx += blk_sz;
free_write_cnt -= blk_sz;
blk[write_idx].size = 0;
return ret;
}
inline const MsgHeader* front() {
uint32_t size = blk[read_idx].size;
if (size == 1) { // wrap around
read_idx = 0;
size = blk[0].size;
}
if (size == 0) return nullptr;
return &blk[read_idx];
}
inline void pop() {
uint32_t blk_sz = (blk[read_idx].size + sizeof(MsgHeader) - 1) / sizeof(MsgHeader);
*(volatile uint32_t*)&read_idx = read_idx + blk_sz;
}
private:
alignas(64) MsgHeader blk[BLK_CNT] = {};
uint32_t write_idx = 0;
uint32_t free_write_cnt = BLK_CNT;
alignas(128) uint32_t read_idx = 0;
};
struct ThreadBuffer
{
SPSCVarQueueOPT varq;
bool shouldDeallocate = false;
char name[32];
size_t nameSize;
};
// https://github.com/MengRao/tscns
class TSCNS
{
public:
static const int64_t NsPerSec = 1000000000;
void init(int64_t init_calibrate_ns = 20000000, int64_t calibrate_interval_ns = 3 * NsPerSec) {
calibate_interval_ns_ = calibrate_interval_ns;
int64_t base_tsc, base_ns;
syncTime(base_tsc, base_ns);
int64_t expire_ns = base_ns + init_calibrate_ns;
while (rdsysns() < expire_ns) std::this_thread::yield();
int64_t delayed_tsc, delayed_ns;
syncTime(delayed_tsc, delayed_ns);
double init_ns_per_tsc = (double)(delayed_ns - base_ns) / (delayed_tsc - base_tsc);
saveParam(base_tsc, base_ns, base_ns, init_ns_per_tsc);
}
void calibrate() {
if (rdtsc() < next_calibrate_tsc_) return;
int64_t tsc, ns;
syncTime(tsc, ns);
int64_t calulated_ns = tsc2ns(tsc);
int64_t ns_err = calulated_ns - ns;
int64_t expected_err_at_next_calibration =
ns_err + (ns_err - base_ns_err_) * calibate_interval_ns_ / (ns - base_ns_ + base_ns_err_);
double new_ns_per_tsc =
ns_per_tsc_ * (1.0 - (double)expected_err_at_next_calibration / calibate_interval_ns_);
saveParam(tsc, calulated_ns, ns, new_ns_per_tsc);
}
static inline int64_t rdtsc() {
#ifdef _MSC_VER
return __rdtsc();
#elif defined(__i386__) || defined(__x86_64__) || defined(__amd64__)
return __builtin_ia32_rdtsc();
#else
return rdsysns();
#endif
}
inline int64_t tsc2ns(int64_t tsc) const {
while (true) {
uint32_t before_seq = param_seq_.load(std::memory_order_acquire) & ~1;
std::atomic_signal_fence(std::memory_order_acq_rel);
int64_t ns = base_ns_ + (int64_t)((tsc - base_tsc_) * ns_per_tsc_);
std::atomic_signal_fence(std::memory_order_acq_rel);
uint32_t after_seq = param_seq_.load(std::memory_order_acquire);
if (before_seq == after_seq) return ns;
}
}
inline int64_t rdns() const { return tsc2ns(rdtsc()); }
static inline int64_t rdsysns() {
using namespace std::chrono;
return duration_cast<nanoseconds>(system_clock::now().time_since_epoch()).count();
}
double getTscGhz() const { return 1.0 / ns_per_tsc_; }
// Linux kernel sync time by finding the first trial with tsc diff < 50000
// We try several times and return the one with the mininum tsc diff.
// Note that MSVC has a 100ns resolution clock, so we need to combine those ns with the same
// value, and drop the first and the last value as they may not scan a full 100ns range
static void syncTime(int64_t& tsc_out, int64_t& ns_out) {
#ifdef _MSC_VER
const int N = 15;
#else
const int N = 3;
#endif
int64_t tsc[N + 1];
int64_t ns[N + 1];
tsc[0] = rdtsc();
for (int i = 1; i <= N; i++) {
ns[i] = rdsysns();
tsc[i] = rdtsc();
}
#ifdef _MSC_VER
int j = 1;
for (int i = 2; i <= N; i++) {
if (ns[i] == ns[i - 1]) continue;
tsc[j - 1] = tsc[i - 1];
ns[j++] = ns[i];
}
j--;
#else
int j = N + 1;
#endif
int best = 1;
for (int i = 2; i < j; i++) {
if (tsc[i] - tsc[i - 1] < tsc[best] - tsc[best - 1]) best = i;
}
tsc_out = (tsc[best] + tsc[best - 1]) >> 1;
ns_out = ns[best];
}
void saveParam(int64_t base_tsc, int64_t base_ns, int64_t sys_ns, double new_ns_per_tsc) {
base_ns_err_ = base_ns - sys_ns;
next_calibrate_tsc_ = base_tsc + (int64_t)((calibate_interval_ns_ - 1000) / new_ns_per_tsc);
uint32_t seq = param_seq_.load(std::memory_order_relaxed);
param_seq_.store(++seq, std::memory_order_release);
std::atomic_signal_fence(std::memory_order_acq_rel);
base_tsc_ = base_tsc;
base_ns_ = base_ns;
ns_per_tsc_ = new_ns_per_tsc;
std::atomic_signal_fence(std::memory_order_acq_rel);
param_seq_.store(++seq, std::memory_order_release);
}
alignas(64) std::atomic<uint32_t> param_seq_ = 0;
double ns_per_tsc_;
int64_t base_tsc_;
int64_t base_ns_;
int64_t calibate_interval_ns_;
int64_t base_ns_err_;
int64_t next_calibrate_tsc_;
};
void init() {
tscns.init();
currentLogLevel = INF;
}
using Context = fmt::format_context;
using MemoryBuffer = fmt::basic_memory_buffer<char, 10000>;
typedef const char* (*FormatToFn)(fmt::string_view format, const char* data, MemoryBuffer& out,
int& argIdx, std::vector<fmt::basic_format_arg<Context>>& args);
static void registerLogInfo(uint32_t& logId, FormatToFn fn, const char* location, LogLevel level,
fmt::string_view fmtString) noexcept;
static void vformat_to(MemoryBuffer& out, fmt::string_view fmt, fmt::format_args args);
static size_t formatted_size(fmt::string_view fmt, fmt::format_args args);
static void vformat_to(char* out, fmt::string_view fmt, fmt::format_args args);
static typename SPSCVarQueueOPT::MsgHeader* allocMsg(uint32_t size, bool logQFullCB) noexcept;
TSCNS tscns;
volatile LogLevel currentLogLevel;
static FAST_THREAD_LOCAL ThreadBuffer* threadBuffer;
template<typename Arg>
static inline constexpr bool isNamedArg() {
return fmt::detail::is_named_arg<fmt::remove_cvref_t<Arg>>::value;
}
template<typename Arg>
struct unNamedType
{ using type = Arg; };
template<typename Arg>
struct unNamedType<fmt::detail::named_arg<char, Arg>>
{ using type = Arg; };
#if FMT_USE_NONTYPE_TEMPLATE_ARGS
template<typename Arg, size_t N, fmt::detail_exported::fixed_string<char, N> Str>
struct unNamedType<fmt::detail::statically_named_arg<Arg, char, N, Str>>
{ using type = Arg; };
#endif
template<typename Arg>
static inline constexpr bool isCstring() {
return fmt::detail::mapped_type_constant<Arg, Context>::value ==
fmt::detail::type::cstring_type;
}
template<typename Arg>
static inline constexpr bool isString() {
return fmt::detail::mapped_type_constant<Arg, Context>::value == fmt::detail::type::string_type;
}
template<typename Arg>
static inline constexpr bool needCallDtor() {
using ArgType = fmt::remove_cvref_t<Arg>;
if constexpr (isNamedArg<Arg>()) {
return needCallDtor<typename unNamedType<ArgType>::type>();
}
if constexpr (isString<Arg>()) return false;
return !std::is_trivially_destructible<ArgType>::value;
}
template<size_t CstringIdx>
static inline constexpr size_t getArgSizes(size_t* cstringSize) {
return 0;
}
template<size_t CstringIdx, typename Arg, typename... Args>
static inline constexpr size_t getArgSizes(size_t* cstringSize, const Arg& arg,
const Args&... args) {
if constexpr (isNamedArg<Arg>()) {
return getArgSizes<CstringIdx>(cstringSize, arg.value, args...);
}
else if constexpr (isCstring<Arg>()) {
size_t len = strlen(arg) + 1;
cstringSize[CstringIdx] = len;
return len + getArgSizes<CstringIdx + 1>(cstringSize, args...);
}
else if constexpr (isString<Arg>()) {
size_t len = arg.size() + 1;
return len + getArgSizes<CstringIdx>(cstringSize, args...);
}
else {
return sizeof(Arg) + getArgSizes<CstringIdx>(cstringSize, args...);
}
}
template<size_t CstringIdx>
static inline constexpr char* encodeArgs(size_t* cstringSize, char* out) {
return out;
}
template<size_t CstringIdx, typename Arg, typename... Args>
static inline constexpr char* encodeArgs(size_t* cstringSize, char* out, Arg&& arg,
Args&&... args) {
if constexpr (isNamedArg<Arg>()) {
return encodeArgs<CstringIdx>(cstringSize, out, arg.value, std::forward<Args>(args)...);
}
else if constexpr (isCstring<Arg>()) {
memcpy(out, arg, cstringSize[CstringIdx]);
return encodeArgs<CstringIdx + 1>(cstringSize, out + cstringSize[CstringIdx],
std::forward<Args>(args)...);
}
else if constexpr (isString<Arg>()) {
size_t len = arg.size();
memcpy(out, arg.data(), len);
out[len] = 0;
return encodeArgs<CstringIdx>(cstringSize, out + len + 1, std::forward<Args>(args)...);
}
else {
// If Arg has alignment >= 16, gcc could emit aligned move instructions(e.g. movdqa) for
// placement new even if the *out* is misaligned, which would cause segfault. So we use memcpy
// when possible
if constexpr (std::is_trivially_copyable_v<fmt::remove_cvref_t<Arg>>) {
memcpy(out, &arg, sizeof(Arg));
}
else {
new (out) fmt::remove_cvref_t<Arg>(std::forward<Arg>(arg));
}
return encodeArgs<CstringIdx>(cstringSize, out + sizeof(Arg), std::forward<Args>(args)...);
}
}
template<size_t Idx, size_t NamedIdx>
static inline constexpr void storeNamedArgs(fmt::detail::named_arg_info<char>* named_args_store) {
}
template<size_t Idx, size_t NamedIdx, typename Arg, typename... Args>
static inline constexpr void storeNamedArgs(fmt::detail::named_arg_info<char>* named_args_store,
const Arg& arg, const Args&... args) {
if constexpr (isNamedArg<Arg>()) {
named_args_store[NamedIdx] = {arg.name, Idx};
storeNamedArgs<Idx + 1, NamedIdx + 1>(named_args_store, args...);
}
else {
storeNamedArgs<Idx + 1, NamedIdx>(named_args_store, args...);
}
}
template<bool ValueOnly, size_t Idx, size_t DestructIdx>
static inline const char* decodeArgs(const char* in, fmt::basic_format_arg<Context>* args,
const char** destruct_args) {
return in;
}
template<bool ValueOnly, size_t Idx, size_t DestructIdx, typename Arg, typename... Args>
static inline const char* decodeArgs(const char* in, fmt::basic_format_arg<Context>* args,
const char** destruct_args) {
using namespace fmtlogdetail;
using ArgType = fmt::remove_cvref_t<Arg>;
if constexpr (isNamedArg<ArgType>()) {
return decodeArgs<ValueOnly, Idx, DestructIdx, typename unNamedType<ArgType>::type, Args...>(
in, args, destruct_args);
}
else if constexpr (isCstring<Arg>() || isString<Arg>()) {
size_t size = strlen(in);
fmt::string_view v(in, size);
if constexpr (ValueOnly) {
fmt::detail::value<Context>& value_ = *(fmt::detail::value<Context>*)(args + Idx);
value_ = fmt::detail::arg_mapper<Context>().map(v);
}
else {
args[Idx] = fmt::detail::make_arg<Context>(v);
}
return decodeArgs<ValueOnly, Idx + 1, DestructIdx, Args...>(in + size + 1, args,
destruct_args);
}
else {
if constexpr (ValueOnly) {
fmt::detail::value<Context>& value_ = *(fmt::detail::value<Context>*)(args + Idx);
if constexpr (UnrefPtr<ArgType>::value) {
value_ = fmt::detail::arg_mapper<Context>().map(**(ArgType*)in);
}
else {
value_ = fmt::detail::arg_mapper<Context>().map(*(ArgType*)in);
}
}
else {
if constexpr (UnrefPtr<ArgType>::value) {
args[Idx] = fmt::detail::make_arg<Context>(**(ArgType*)in);
}
else {
args[Idx] = fmt::detail::make_arg<Context>(*(ArgType*)in);
}
}
if constexpr (needCallDtor<Arg>()) {
destruct_args[DestructIdx] = in;
return decodeArgs<ValueOnly, Idx + 1, DestructIdx + 1, Args...>(in + sizeof(ArgType), args,
destruct_args);
}
else {
return decodeArgs<ValueOnly, Idx + 1, DestructIdx, Args...>(in + sizeof(ArgType), args,
destruct_args);
}
}
}
template<size_t DestructIdx>
static inline void destructArgs(const char** destruct_args) {}
template<size_t DestructIdx, typename Arg, typename... Args>
static inline void destructArgs(const char** destruct_args) {
using ArgType = fmt::remove_cvref_t<Arg>;
if constexpr (isNamedArg<ArgType>()) {
destructArgs<DestructIdx, typename unNamedType<ArgType>::type, Args...>(destruct_args);
}
else if constexpr (needCallDtor<Arg>()) {
((ArgType*)destruct_args[DestructIdx])->~ArgType();
destructArgs<DestructIdx + 1, Args...>(destruct_args);
}
else {
destructArgs<DestructIdx, Args...>(destruct_args);
}
}
template<typename... Args>
static const char* formatTo(fmt::string_view format, const char* data, MemoryBuffer& out,
int& argIdx, std::vector<fmt::basic_format_arg<Context>>& args) {
constexpr size_t num_args = sizeof...(Args);
constexpr size_t num_dtors = fmt::detail::count<needCallDtor<Args>()...>();
const char* dtor_args[std::max(num_dtors, (size_t)1)];
const char* ret;
if (argIdx < 0) {
argIdx = (int)args.size();
args.resize(argIdx + num_args);
ret = decodeArgs<false, 0, 0, Args...>(data, args.data() + argIdx, dtor_args);
}
else {
ret = decodeArgs<true, 0, 0, Args...>(data, args.data() + argIdx, dtor_args);
}
vformat_to(out, format, fmt::basic_format_args(args.data() + argIdx, num_args));
destructArgs<0, Args...>(dtor_args);
return ret;
}
template<bool Reorder, typename... Args>
static fmt::string_view unNameFormat(fmt::string_view in, uint32_t* reorderIdx,
const Args&... args) {
constexpr size_t num_named_args = fmt::detail::count<isNamedArg<Args>()...>();
if constexpr (num_named_args == 0) {
return in;
}
const char* begin = in.data();
const char* p = begin;
std::unique_ptr<char[]> unnamed_str(new char[in.size() + 1 + num_named_args * 5]);
fmt::detail::named_arg_info<char> named_args[std::max(num_named_args, (size_t)1)];
storeNamedArgs<0, 0>(named_args, args...);
char* out = (char*)unnamed_str.get();
uint8_t arg_idx = 0;
while (true) {
auto c = *p++;
if (!c) {
size_t copy_size = p - begin - 1;
memcpy(out, begin, copy_size);
out += copy_size;
break;
}
if (c != '{') continue;
size_t copy_size = p - begin;
memcpy(out, begin, copy_size);
out += copy_size;
begin = p;
c = *p++;
if (!c) fmt::detail::throw_format_error("invalid format string");
if (fmt::detail::is_name_start(c)) {
while ((fmt::detail::is_name_start(c = *p) || ('0' <= c && c <= '9'))) {
++p;
}
fmt::string_view name(begin, p - begin);
int id = -1;
for (size_t i = 0; i < num_named_args; ++i) {
if (named_args[i].name == name) {
id = named_args[i].id;
break;
}
}
if (id < 0) fmt::detail::throw_format_error("invalid format string");
if constexpr (Reorder) {
reorderIdx[id] = arg_idx++;
}
else {
out = fmt::format_to(out, "{}", id);
}
}
else {
*out++ = c;
}
begin = p;
}
const char* ptr = unnamed_str.release();
return fmt::string_view(ptr, out - ptr);
}
public:
template<typename... Args>
inline void log(
uint32_t& logId, int64_t tsc, const char* location, LogLevel level,
fmt::format_string<typename fmtlogdetail::UnrefPtr<fmt::remove_cvref_t<Args>>::type...> format,
Args&&... args) noexcept {
if (!logId) {
auto unnamed_format = unNameFormat<false>(fmt::string_view(format), nullptr, args...);
registerLogInfo(logId, formatTo<Args...>, location, level, unnamed_format);
}
constexpr size_t num_cstring = fmt::detail::count<isCstring<Args>()...>();
size_t cstringSizes[std::max(num_cstring, (size_t)1)];
uint32_t alloc_size = 8 + (uint32_t)getArgSizes<0>(cstringSizes, args...);
bool q_full_cb = true;
do {
if (auto header = allocMsg(alloc_size, q_full_cb)) {
header->logId = logId;
char* out = (char*)(header + 1);
*(int64_t*)out = tsc;
out += 8;
encodeArgs<0>(cstringSizes, out, std::forward<Args>(args)...);
header->push(alloc_size);
break;
}
q_full_cb = false;
} while (FMTLOG_BLOCK);
}
template<typename... Args>
inline void logOnce(const char* location, LogLevel level, fmt::format_string<Args...> format,
Args&&... args) {
fmt::string_view sv(format);
auto&& fmt_args = fmt::make_format_args(args...);
uint32_t fmt_size = formatted_size(sv, fmt_args);
uint32_t alloc_size = 8 + 8 + fmt_size;
bool q_full_cb = true;
do {
if (auto header = allocMsg(alloc_size, q_full_cb)) {
header->logId = (uint32_t)level;
char* out = (char*)(header + 1);
*(int64_t*)out = tscns.rdtsc();
out += 8;
*(const char**)out = location;
out += 8;
vformat_to(out, sv, fmt_args);
header->push(alloc_size);
break;
}
q_full_cb = false;
} while (FMTLOG_BLOCK);
}
};
using fmtlog = fmtlogT<>;
template<int _>
FAST_THREAD_LOCAL typename fmtlogT<_>::ThreadBuffer* fmtlogT<_>::threadBuffer;
template<int __ = 0>
struct fmtlogWrapper
{ static fmtlog impl; };
template<int _>
fmtlog fmtlogWrapper<_>::impl;
template<int _>
inline void fmtlogT<_>::setLogLevel(LogLevel logLevel) noexcept {
fmtlogWrapper<>::impl.currentLogLevel = logLevel;
}
template<int _>
inline typename fmtlogT<_>::LogLevel fmtlogT<_>::getLogLevel() noexcept {
return fmtlogWrapper<>::impl.currentLogLevel;
}
template<int _>
inline bool fmtlogT<_>::checkLogLevel(LogLevel logLevel) noexcept {
#ifdef FMTLOG_NO_CHECK_LEVEL
return true;
#else
return logLevel >= fmtlogWrapper<>::impl.currentLogLevel;
#endif
}
#define __FMTLOG_S1(x) #x
#define __FMTLOG_S2(x) __FMTLOG_S1(x)
#define __FMTLOG_LOCATION __FILE__ ":" __FMTLOG_S2(__LINE__)
#define FMTLOG(level, format, ...) \
do { \
static uint32_t logId = 0; \
if (!fmtlog::checkLogLevel(level)) break; \
fmtlogWrapper<>::impl.log(logId, fmtlogWrapper<>::impl.tscns.rdtsc(), __FMTLOG_LOCATION, \
level, format, ##__VA_ARGS__); \
} while (0)
#define FMTLOG_LIMIT(min_interval, level, format, ...) \
do { \
static uint32_t logId = 0; \
static int64_t limitNs = 0; \
if (!fmtlog::checkLogLevel(level)) break; \
int64_t tsc = fmtlogWrapper<>::impl.tscns.rdtsc(); \
int64_t ns = fmtlogWrapper<>::impl.tscns.tsc2ns(tsc); \
if (ns < limitNs) break; \
limitNs = ns + min_interval; \
fmtlogWrapper<>::impl.log(logId, tsc, __FMTLOG_LOCATION, level, format, ##__VA_ARGS__); \
} while (0)
#define FMTLOG_ONCE(level, format, ...) \
do { \
if (!fmtlog::checkLogLevel(level)) break; \
fmtlogWrapper<>::impl.logOnce(__FMTLOG_LOCATION, level, format, ##__VA_ARGS__); \
} while (0)
#if FMTLOG_ACTIVE_LEVEL <= FMTLOG_LEVEL_DBG
#define logd(format, ...) FMTLOG(fmtlog::DBG, format, ##__VA_ARGS__)
#define logdo(format, ...) FMTLOG_ONCE(fmtlog::DBG, format, ##__VA_ARGS__)
#define logdl(min_interval, format, ...) FMTLOG_LIMIT(min_interval, fmtlog::DBG, format, ##__VA_ARGS__)
#else
#define logd(format, ...) (void)0
#define logdo(format, ...) (void)0
#define logdl(min_interval, format, ...) (void)0
#endif
#if FMTLOG_ACTIVE_LEVEL <= FMTLOG_LEVEL_INF
#define logi(format, ...) FMTLOG(fmtlog::INF, format, ##__VA_ARGS__)
#define logio(format, ...) FMTLOG_ONCE(fmtlog::INF, format, ##__VA_ARGS__)
#define logil(min_interval, format, ...) FMTLOG_LIMIT(min_interval, fmtlog::INF, format, ##__VA_ARGS__)
#else
#define logi(format, ...) (void)0
#define logio(format, ...) (void)0
#define logil(min_interval, format, ...) (void)0
#endif
#if FMTLOG_ACTIVE_LEVEL <= FMTLOG_LEVEL_WRN
#define logw(format, ...) FMTLOG(fmtlog::WRN, format, ##__VA_ARGS__)
#define logwo(format, ...) FMTLOG_ONCE(fmtlog::WRN, format, ##__VA_ARGS__)
#define logwl(min_interval, format, ...) FMTLOG_LIMIT(min_interval, fmtlog::WRN, format, ##__VA_ARGS__)
#else
#define logw(format, ...) (void)0
#define logwo(format, ...) (void)0
#define logwl(min_interval, format, ...) (void)0
#endif
#if FMTLOG_ACTIVE_LEVEL <= FMTLOG_LEVEL_ERR
#define loge(format, ...) FMTLOG(fmtlog::ERR, format, ##__VA_ARGS__)
#define logeo(format, ...) FMTLOG_ONCE(fmtlog::ERR, format, ##__VA_ARGS__)
#define logel(min_interval, format, ...) FMTLOG_LIMIT(min_interval, fmtlog::ERR, format, ##__VA_ARGS__)
#else
#define loge(format, ...) (void)0
#define logeo(format, ...) (void)0
#define logel(min_interval, format, ...) (void)0
#endif
#ifdef FMTLOG_HEADER_ONLY
#include "fmtlog-inl.h"
#endif

View file

@ -1,15 +0,0 @@
project('fmtlog', 'cpp', version: '2.2.2')
incdir = include_directories(
'include',
is_system: true # Ignores warnings from include dir
)
libdir = meson.current_source_dir()
fmtlog_dep = declare_dependency(
include_directories: incdir,
link_args: [libdir + '/libfmtlog-static.a']
)
install_headers('include/fmtlog.h', subdir: 'fmtlog')

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1,45 @@
AccessModifierOffset: -2
AlignConsecutiveDeclarations: None
AlignOperands: false
AlignTrailingComments: true
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortBlocksOnASingleLine: Never
AllowShortIfStatementsOnASingleLine: Never
AllowShortLoopsOnASingleLine: false
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: Yes
BinPackParameters: true
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Allman
BreakBeforeTernaryOperators: true
BreakConstructorInitializers: BeforeColon
ColumnLimit: 100
CommentPragmas: ''
ConstructorInitializerAllOnOneLineOrOnePerLine: true
ConstructorInitializerIndentWidth: 2
ContinuationIndentWidth: 2
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
IndentCaseLabels: false
IndentPPDirectives: BeforeHash
IndentWidth: 2
IndentWrappedFunctionNames: false
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
PenaltyBreakBeforeFirstCallParameter: 1
PenaltyBreakComment: 300
PenaltyBreakString: 1000
PenaltyBreakFirstLessLess: 120
PenaltyExcessCharacter: 1
PenaltyReturnTypeOnItsOwnLine: 1000
PointerAlignment: Left
SpaceBeforeAssignmentOperators: true
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
Cpp11BracedListStyle: true
Standard: c++20
TabWidth: 2
UseTab: Never

View file

@ -0,0 +1,5 @@
ignore:
- "examples"
- "quill/include/quill/bundled"
- "quill/src/bundled"
- "quill/test"

View file

@ -0,0 +1,33 @@
name: coverage
on:
push:
branches:
- master
paths-ignore:
- '**.md'
- 'docs/**'
jobs:
build:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v4
- name: Configure
run: cmake -Bbuild -DCMAKE_BUILD_TYPE=Debug -DCMAKE_CXX_STANDARD=17 -DQUILL_BUILD_TESTS=ON -DQUILL_CODE_COVERAGE=ON -DQUILL_BUILD_EXAMPLES=ON -DQUILL_VERBOSE_MAKEFILE=ON
- name: Build
run: cmake --build build -j4
- name: Test
run: |
cd build
ctest --build-config Debug
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true

View file

@ -0,0 +1,111 @@
name: linux
on:
push:
branches:
- master
paths-ignore:
- '**.md'
- 'docs/**'
pull_request:
branches:
- master
paths-ignore:
- '**.md'
- 'docs/**'
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
cxx: [ g++-8, g++-10 ]
build_type: [ Debug, Release ]
std: [ 17 ]
os: [ ubuntu-20.04 ]
with_tests: [ ON ]
include:
# Build and with g++8
- cxx: g++-8
std: 17
os: ubuntu-20.04
with_tests: ON
install: sudo apt -o Acquire::Retries=5 install g++-8
# Build and test as shared library
- cxx: g++-10
build_type: Release
std: 17
os: ubuntu-20.04
with_tests: ON
cmake_options: -DBUILD_SHARED_LIBS=ON -DCMAKE_CXX_VISIBILITY_PRESET=hidden -DCMAKE_VISIBILITY_INLINES_HIDDEN=ON
# Builds with no exceptions
- cxx: g++-10
build_type: Release
std: 17
os: ubuntu-20.04
with_tests: OFF
cmake_options: -DQUILL_NO_EXCEPTIONS=ON
# Build and test with valgrind, sanitizers
- cxx: g++-10
build_type: Release
std: 20
os: ubuntu-20.04
with_tests: ON
cmake_options: -DQUILL_USE_VALGRIND=ON
ctest_options: -T memcheck
install: sudo apt -o Acquire::Retries=5 install valgrind
# Build and test sanitizers
- cxx: clang++-12
build_type: Release
std: 20
os: ubuntu-20.04
with_tests: ON
cmake_options: -DQUILL_SANITIZE_ADDRESS=ON
# Build and test sanitizers
- cxx: clang++-12
build_type: Release
std: 20
os: ubuntu-20.04
with_tests: ON
cmake_options: -DQUILL_SANITIZE_THREAD=ON
steps:
- uses: actions/checkout@v4
- name: Create Build Environment
run: |
sudo apt-get update
${{matrix.install}}
cmake -E make_directory ${{runner.workspace}}/build
- name: Configure
working-directory: ${{runner.workspace}}/build
env:
CXX: ${{matrix.cxx}}
CXXFLAGS: ${{matrix.cxxflags}}
run: |
cmake -DCMAKE_BUILD_TYPE=${{matrix.build_type}} ${{matrix.cmake_options}} \
-DCMAKE_CXX_STANDARD=${{matrix.std}} -DQUILL_BUILD_TESTS=${{matrix.with_tests}} \
-DQUILL_BUILD_EXAMPLES=ON -DQUILL_VERBOSE_MAKEFILE=ON $GITHUB_WORKSPACE
- name: Build
working-directory: ${{runner.workspace}}/build
run: |
threads=`nproc`
cmake --build . --config ${{matrix.build_type}} --parallel $threads
- name: Test
working-directory: ${{runner.workspace}}/build
run: |
threads=`nproc`
ctest --build-config ${{matrix.build_type}} ${{matrix.ctest_options}} --parallel $threads --output-on-failure
env:
CTEST_OUTPUT_ON_FAILURE: True

View file

@ -0,0 +1,50 @@
name: macos
on:
push:
branches:
- master
paths-ignore:
- '**.md'
- 'docs/**'
pull_request:
branches:
- master
paths-ignore:
- '**.md'
- 'docs/**'
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ macos-14 ]
build_type: [ Debug, Release ]
std: [ 17, 20 ]
steps:
- uses: actions/checkout@v4
- name: Create Build Environment
run: cmake -E make_directory ${{runner.workspace}}/build
- name: Configure
working-directory: ${{runner.workspace}}/build
run: |
cmake -DCMAKE_BUILD_TYPE=${{matrix.build_type}} \
-DCMAKE_CXX_STANDARD=${{matrix.std}} -DQUILL_BUILD_TESTS=ON \
-DQUILL_BUILD_EXAMPLES=ON -DQUILL_VERBOSE_MAKEFILE=ON $GITHUB_WORKSPACE
- name: Build
working-directory: ${{runner.workspace}}/build
run: |
threads=`sysctl -n hw.logicalcpu`
cmake --build . --config ${{matrix.build_type}} --parallel $threads
- name: Test
working-directory: ${{runner.workspace}}/build
run: |
threads=`sysctl -n hw.logicalcpu`
ctest --build-config ${{matrix.build_type}} --parallel $threads --output-on-failure

View file

@ -0,0 +1,78 @@
name: windows
on:
push:
branches:
- master
paths-ignore:
- '**.md'
- 'docs/**'
pull_request:
branches:
- master
paths-ignore:
- '**.md'
- 'docs/**'
jobs:
build:
# windows-2016 and windows-2019 have MSVC 2017 and 2019 installed
# respectively: https://github.com/actions/virtual-environments.
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ windows-2019, windows-2022 ]
platform: [ x64 ]
build_type: [ Debug, Release ]
std: [ 17, 20 ]
with_tests: [ ON ]
include:
# Builds with no exceptions
- os: windows-2019
platform: x64
build_type: Release
std: 17
with_tests: "OFF"
cmake_options: -DQUILL_NO_EXCEPTIONS=ON
# Builds for ARM
- os: windows-2019
platform: ARM64
build_type: Release
std: 17
with_tests: "OFF"
- os: windows-2019
platform: ARM
build_type: Release
std: 17
with_tests: "OFF"
steps:
- uses: actions/checkout@v4
- name: Create Build Environment
run: cmake -E make_directory ${{runner.workspace}}/build
- name: Configure
shell: bash # Use a bash shell for $GITHUB_WORKSPACE.
working-directory: ${{runner.workspace}}/build
run: |
cmake -DCMAKE_BUILD_TYPE=${{matrix.build_type}} ${{matrix.cmake_options}} \
-A ${{matrix.platform}} -DCMAKE_CXX_STANDARD=${{matrix.std}} -DQUILL_BUILD_TESTS=${{matrix.with_tests}} \
-DQUILL_BUILD_EXAMPLES=ON -DQUILL_VERBOSE_MAKEFILE=ON $GITHUB_WORKSPACE
- name: Build
working-directory: ${{runner.workspace}}/build
run: |
$threads = (Get-CimInstance Win32_ComputerSystem).NumberOfLogicalProcessors
cmake --build . --config ${{matrix.build_type}} --parallel $threads
- name: Test
working-directory: ${{runner.workspace}}/build
run: |
$threads = (Get-CimInstance Win32_ComputerSystem).NumberOfLogicalProcessors
ctest --build-config ${{matrix.build_type}} --parallel $threads --output-on-failure

8
subprojects/quill-4.2.0/.gitignore vendored Normal file
View file

@ -0,0 +1,8 @@
# Project exclude paths
/cmake-build-debug/
/.vs
/out/build/x64-Debug
/CMakeSettings.json
/out/build/x64-Release
/out/build/Mingw64-Debug
/out/build/x64-Debug-2

View file

@ -0,0 +1 @@
768ccd8048e2f53838c17ea6480236bb8d89fa785fb08f378539bfb0aa61ac1f

View file

@ -0,0 +1,22 @@
# .readthedocs.yaml
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
# Required
version: 2
# Set the version of Python and other tools you might need
build:
os: ubuntu-22.04
tools:
python: "3.11"
# Build documentation in the docs/ directory with Sphinx
sphinx:
configuration: docs/conf.py
# We recommend specifying your dependencies to enable reproducible builds:
# https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
python:
install:
- requirements: docs/requirements.txt

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,166 @@
cmake_minimum_required(VERSION 3.8)
project(quill)
#-------------------------------------------------------------------------------------------------------
# Options
#-------------------------------------------------------------------------------------------------------
# Builds Quill without exceptions by adding the -fno-exceptions flag to the compiler.
option(QUILL_NO_EXCEPTIONS "Build without exceptions using -fno-exceptions flag" OFF)
# Disables features that rely on retrieving the thread name, which is not supported in older versions of Windows (e.g., Windows Server 2012/2016).
# Enabling this option ensures compatibility with older Windows versions.
option(QUILL_NO_THREAD_NAME_SUPPORT "Disable features not supported on Windows Server 2012/2016" OFF)
# Enables the use of _mm_prefetch, _mm_clflush, and _mm_clflushopt instructions to enhance cache coherence performance on x86 architectures.
# When enabled, Quill will utilize these instructions on the frontend's queue operations.
# Ensure to specify the target architecture with -march="..." when compiling to maximize compatibility and performance.
option(QUILL_X86ARCH "Enable optimizations for cache coherence on x86 architectures using specific CPU instructions" OFF)
# When enabled, removes the non-prefixed `LOG_*` macros, leaving only `QUILL_LOG_*` macros available.
# This is useful in scenarios where the original macro names conflict with those of an existing logging library.
option(QUILL_DISABLE_NON_PREFIXED_MACROS "Disable non-prefixed macros" OFF)
option(QUILL_DISABLE_POSITION_INDEPENDENT_CODE "Disable position-independent code" OFF)
option(QUILL_BUILD_EXAMPLES "Build the examples" OFF)
option(QUILL_BUILD_TESTS "Build the tests (Requires https://github.com/google/googletest to be installed)" OFF)
option(QUILL_BUILD_BENCHMARKS "Build the benchmarks" OFF)
option(QUILL_SANITIZE_ADDRESS "Enable address sanitizer in tests" OFF)
option(QUILL_SANITIZE_THREAD "Enable thread sanitizer in tests (Using this option with any other compiler except clang may result to false positives)" OFF)
option(QUILL_CODE_COVERAGE "Enable code coverage" OFF)
option(QUILL_USE_VALGRIND "Use valgrind as the default memcheck tool in CTest (Requires Valgrind)" OFF)
option(QUILL_ENABLE_INSTALL "Enable CMake Install when Quill is not a master project" OFF)
option(QUILL_DOCS_GEN "Generate documentation" OFF)
#-------------------------------------------------------------------------------------------------------
# Use newer policies if possible, up to most recent tested version of CMake.
#-------------------------------------------------------------------------------------------------------
cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION})
#-------------------------------------------------------------------------------------------------------
# Include common compiler options
#-------------------------------------------------------------------------------------------------------
include(${PROJECT_SOURCE_DIR}/cmake/SetCommonCompileOptions.cmake)
#-------------------------------------------------------------------------------------------------------
# Determine if quill is built as a subproject (using add_subdirectory) or if it is the master project.
#-------------------------------------------------------------------------------------------------------
set(QUILL_MASTER_PROJECT FALSE CACHE BOOL "Master Project" FORCE)
if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)
set(QUILL_MASTER_PROJECT TRUE CACHE BOOL "Master Project" FORCE)
endif ()
#-------------------------------------------------------------------------------------------------------
# Custom cmake functions
#-------------------------------------------------------------------------------------------------------
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/quill/cmake")
include(Utility)
#-------------------------------------------------------------------------------------------------------
# Resolve version
#-------------------------------------------------------------------------------------------------------
quill_extract_version()
project(quill VERSION ${QUILL_VERSION} LANGUAGES CXX)
#-------------------------------------------------------------------------------------------------------
# Set default build to release
#-------------------------------------------------------------------------------------------------------
if (NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build" FORCE)
endif ()
#---------------------------------------------------------------------------------------
# Compiler config
#---------------------------------------------------------------------------------------
if (NOT CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
endif ()
#-------------------------------------------------------------------------------------------------------
# Required Packages
#-------------------------------------------------------------------------------------------------------
find_package(Threads REQUIRED)
if (QUILL_BUILD_TESTS)
enable_testing()
if (QUILL_USE_VALGRIND)
# find valgrind
find_program(MEMORYCHECK_COMMAND NAMES valgrind)
if (NOT MEMORYCHECK_COMMAND)
message(WARNING "Valgrind not found")
endif ()
# set valgrind params
set(MEMORYCHECK_COMMAND_OPTIONS "--tool=memcheck --leak-check=full --leak-resolution=med --show-leak-kinds=all --track-origins=yes --vgdb=no --fair-sched=yes")
# add memcheck test action to ctest
include(CTest)
endif ()
endif ()
#-------------------------------------------------------------------------------------------------------
# Log Info
#-------------------------------------------------------------------------------------------------------
if (QUILL_MASTER_PROJECT)
option(QUILL_VERBOSE_MAKEFILE "Verbose make output" OFF)
message(STATUS "CMAKE_BUILD_TYPE: " ${CMAKE_BUILD_TYPE})
message(STATUS "QUILL_VERSION: ${QUILL_VERSION}")
endif ()
message(STATUS "QUILL_NO_EXCEPTIONS: " ${QUILL_NO_EXCEPTIONS})
message(STATUS "QUILL_NO_THREAD_NAME_SUPPORT: " ${QUILL_NO_THREAD_NAME_SUPPORT})
message(STATUS "QUILL_X86ARCH: " ${QUILL_X86ARCH})
message(STATUS "QUILL_DISABLE_NON_PREFIXED_MACROS: " ${QUILL_DISABLE_NON_PREFIXED_MACROS})
#---------------------------------------------------------------------------------------
# Verbose make file option
#---------------------------------------------------------------------------------------
if (QUILL_VERBOSE_MAKEFILE)
set(CMAKE_VERBOSE_MAKEFILE TRUE CACHE BOOL "Verbose output" FORCE)
endif ()
# address sanitizer flags
if (QUILL_SANITIZE_ADDRESS)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address,undefined -fno-omit-frame-pointer -g")
endif ()
# thread sanitizer flags
if (QUILL_SANITIZE_THREAD)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=thread")
endif ()
# Append extra options for coverage
if (QUILL_CODE_COVERAGE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -g -fprofile-arcs -ftest-coverage")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-arcs -ftest-coverage")
endif ()
# Build Examples
if (QUILL_BUILD_EXAMPLES)
add_subdirectory(examples)
endif ()
if (QUILL_BUILD_BENCHMARKS)
add_subdirectory(benchmarks)
endif ()
add_subdirectory(quill)
if (QUILL_DOCS_GEN)
# Add the cmake folder so the FindSphinx module is found
set(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH})
add_subdirectory(docs)
endif ()

View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2020 - present, Odysseas Georgoudis
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -0,0 +1,19 @@
Copyright (c) 2021 The Meson development team
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -0,0 +1,511 @@
<div align="center">
<a href="https://github.com/odygrd/quill">
<img width="125" src="https://i.postimg.cc/DZrH8HkX/quill-circle-photos-v2-x2-colored-toned.png" alt="Quill logo">
</a>
<h1>Quill</h1>
<div>
<a href="https://github.com/odygrd/quill/actions?query=workflow%3Alinux">
<img src="https://img.shields.io/github/actions/workflow/status/odygrd/quill/linux.yml?branch=master&label=linux&logo=linux&style=flat-square" alt="linux-ci" />
</a>
<a href="https://github.com/odygrd/quill/actions?query=workflow%3Amacos">
<img src="https://img.shields.io/github/actions/workflow/status/odygrd/quill/macos.yml?branch=master&label=macos&logo=apple&logoColor=white&style=flat-square" alt="macos-ci" />
</a>
<a href="https://github.com/odygrd/quill/actions?query=workflow%3Awindows">
<img src="https://img.shields.io/github/actions/workflow/status/odygrd/quill/windows.yml?branch=master&label=windows&logo=windows&logoColor=blue&style=flat-square" alt="windows-ci" />
</a>
</div>
<div>
<a href="https://codecov.io/gh/odygrd/quill">
<img src="https://img.shields.io/codecov/c/gh/odygrd/quill/master.svg?logo=codecov&style=flat-square" alt="Codecov" />
</a>
<a href="https://app.codacy.com/gh/odygrd/quill/dashboard?utm_source=gh&utm_medium=referral&utm_content=&utm_campaign=Badge_grade">
<img src="https://img.shields.io/codacy/grade/cd387bc34658475d98bff84db3ad5287?logo=codacy&style=flat-square" alt="Codacy" />
</a>
<a href="https://www.codefactor.io/repository/github/odygrd/quill">
<img src="https://img.shields.io/codefactor/grade/github/odygrd/quill?logo=codefactor&style=flat-square" alt="CodeFactor" />
</a>
</div>
<div>
<a href="https://opensource.org/licenses/MIT">
<img src="https://img.shields.io/badge/license-MIT-blue.svg?style=flat-square" alt="license" />
</a>
<a href="https://en.wikipedia.org/wiki/C%2B%2B17">
<img src="https://img.shields.io/badge/language-C%2B%2B17-red.svg?style=flat-square" alt="language" />
</a>
</div>
<p><b>Asynchronous Low Latency C++ Logging Library</b></p>
</div>
<br>
- [Introduction](#introduction)
- [Documentation](#documentation)
- [Features](#features)
- [Caveats](#caveats)
- [Performance](#performance)
- [Quick Start](#quick-start)
- [CMake Integration](#cmake-integration)
- [Design](#design)
- [License](#license)
| homebrew | vcpkg | conan |
|:--------------------:|:---------------------:|:-----------------:|
| `brew install quill` | `vcpkg install quill` | `quill/[>=1.2.3]` |
## Introduction
Quill is a high-performance, cross-platform logging library designed for C++17 and onwards.
Quill is a production-ready logging library that has undergone extensive unit testing. It has been successfully utilized
in production environments, including financial trading applications, providing high-performance and reliable logging
capabilities.
## Documentation
For detailed documentation and usage instructions, please visit
the [Quill Documentation on Read the Docs](http://quillcpp.readthedocs.io/). It provides comprehensive information on
how to integrate and utilize Quill in your C++ applications.
Additionally, you can explore the [examples](http://github.com/odygrd/quill/tree/master/examples) folder in the Quill
repository on GitHub. These examples serve as valuable resources to understand different usage scenarios and demonstrate
the capabilities of the library.
## Features
- **Low Latency Logging**: Achieve fast logging performance with low latency. Refer to
the [Benchmarks](http://github.com/odygrd/quill#performance) for more details.
- **Asynchronous logging**: Log arguments and messages are formatted in a backend logging thread, effectively offloading
the formatting overhead from the critical path.
- **Custom Formatters**: Customize log formatting based on user-defined patterns.
Explore [Formatters](http://quillcpp.readthedocs.io/en/latest/tutorial.html#formatters) for further details.
- **Flexible Timestamp Generation**: Choose between rdtsc, chrono, or custom clocks (useful for simulations) for
log message timestamp generation.
- **Log Stack Traces**: Store log messages in a ring buffer and display them later in response to a higher severity log
statement or on demand. Refer
to [Backtrace Logging](http://quillcpp.readthedocs.io/en/latest/tutorial.html#backtrace-logging) for more information.
- **Multiple Logging Sinks**: Utilize various logging targets, including:
- Console logging with color support.
- File logging.
- Rotating log files based on time or size.
- JSON logging.
- Custom sinks.
- **Log Message Filtering**: Apply filters to selectively process log messages. Learn more
about [Filters](http://quillcpp.readthedocs.io/en/latest/tutorial.html#filters).
- **Structured Logging**: Generate JSON structured logs.
See [Structured-Log](http://quillcpp.readthedocs.io/en/latest/tutorial.html#json-log) for details.
- **Blocking or Dropping Message Modes**: Choose between `blocking` or `dropping` message modes in the library.
In `blocking` mode, the hot threads pause and wait when the queue is full until space becomes available, ensuring no
message loss but introducing potential latency. In `dropping` mode, log messages beyond the queue's capacity may be
dropped to prioritize low latency. The library provides reports on dropped messages, queue reallocations, and blocked
hot threads for monitoring purposes.
- **Queue Types**: The library supports different types of queues for transferring logs from the hot path to the backend
thread: bounded queues with a fixed capacity and unbounded queues that start small and can dynamically grow.
- **Wide Character Support**: Wide strings compatible with ASCII encoding are supported, applicable to Windows only.
Additionally, there is support for logging STL containers consisting of wide strings. Note that chaining STL types,
such as `std::vector<std::vector<std::wstring>>` is not supported for wide strings.
- **Ordered Log Statements**: Log statements are ordered by timestamp even when produced by different threads,
facilitating easier debugging of multithreaded applications.
- **Compile-Time Log Level Stripping**: Completely strip out log levels at compile time, reducing `if` branches.
- **Clean and Warning-Free Codebase**: Ensure a clean and warning-free codebase, even with high compiler warning levels.
- **Crash-Safe Behavior**: Benefit from crash-safe behavior with a built-in signal handler.
- **Type-Safe API**: Type safe api using the excellent [{fmt}](http://github.com/fmtlib/fmt) library.
- **Huge Pages**: Benefit from support for huge pages on the hot path. This feature allows for improved performance and
efficiency.
## Caveats
Quill may not work well with `fork()` since it spawns a background thread and `fork()` doesn't work well
with multithreading.
If your application uses `fork()` and you want to log in the child processes as well, you should call
`quill::start()` after the `fork()` call. Additionally, you should ensure that you write to different
files in the parent and child processes to avoid conflicts.
For example :
```c++
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include "quill/sinks/FileSink.h"
int main()
{
// DO NOT CALL THIS BEFORE FORK
// quill::Backend::start();
if (fork() == 0)
{
quill::Backend::start();
// Get or create a handler to the file - Write to a different file
auto file_sink = quill::Frontend::create_or_get_sink<quill::FileSink>(
"child.log");
quill::Logger* logger = quill::Frontend::create_or_get_logger("root", std::move(file_sink));
QUILL_LOG_INFO(logger, "Hello from Child {}", 123);
}
else
{
quill::Backend::start();
// Get or create a handler to the file - Write to a different file
auto file_sink = quill::Frontend::create_or_get_sink<quill::FileSink>(
"parent.log");
quill::Logger* logger = quill::Frontend::create_or_get_logger("root", std::move(file_sink));
QUILL_LOG_INFO(logger, "Hello from Parent {}", 123);
}
}
```
## Performance
### Latency
The results presented in the tables below are measured in `nanoseconds (ns)`.
#### Logging Numbers
The following message is logged 100'000 times for each thread:
`LOG_INFO(logger, "Logging int: {}, int: {}, double: {}", i, j, d)`.
##### 1 Thread Logging
| Library | 50th | 75th | 90th | 95th | 99th | 99.9th |
|---------------------------------------------------------------------|:----:|:----:|:----:|:----:|:----:|:------:|
| [Quill v4.1 Bounded Dropping Queue](http://github.com/odygrd/quill) | 7 | 7 | 8 | 8 | 9 | 10 |
| [fmtlog](http://github.com/MengRao/fmtlog) | 8 | 8 | 9 | 9 | 10 | 13 |
| [Quill v4.1 Unbounded Queue](http://github.com/odygrd/quill) | 8 | 8 | 9 | 9 | 10 | 13 |
| [Quill v3.8 Unbounded Queue](http://github.com/odygrd/quill) | 8 | 8 | 9 | 9 | 10 | 13 |
| [PlatformLab NanoLog](http://github.com/PlatformLab/NanoLog) | 11 | 12 | 13 | 14 | 15 | 20 |
| [MS BinLog](http://github.com/Morgan-Stanley/binlog) | 21 | 21 | 22 | 24 | 28 | 57 |
| [Reckless](http://github.com/mattiasflodin/reckless) | 41 | 45 | 47 | 48 | 49 | 69 |
| [Iyengar NanoLog](http://github.com/Iyengar111/NanoLog) | 51 | 54 | 63 | 81 | 113 | 160 |
| [spdlog](http://github.com/gabime/spdlog) | 148 | 153 | 159 | 163 | 169 | 176 |
| [g3log](http://github.com/KjellKod/g3log) | 1192 | 1282 | 1363 | 1440 | 1624 | 1802 |
##### 4 Threads Logging Simultaneously
| Library | 50th | 75th | 90th | 95th | 99th | 99.9th |
|---------------------------------------------------------------------|:----:|:----:|:----:|:----:|:----:|:------:|
| [Quill v4.1 Bounded Dropping Queue](http://github.com/odygrd/quill) | 7 | 8 | 9 | 9 | 10 | 13 |
| [fmtlog](http://github.com/MengRao/fmtlog) | 8 | 8 | 9 | 9 | 11 | 13 |
| [Quill v3.8 Unbounded Queue](http://github.com/odygrd/quill) | 8 | 9 | 10 | 10 | 11 | 13 |
| [Quill v4.1 Unbounded Queue](http://github.com/odygrd/quill) | 9 | 9 | 10 | 11 | 12 | 15 |
| [PlatformLab NanoLog](http://github.com/PlatformLab/NanoLog) | 12 | 13 | 13 | 14 | 15 | 19 |
| [MS BinLog](http://github.com/Morgan-Stanley/binlog) | 21 | 21 | 22 | 22 | 29 | 62 |
| [Reckless](http://github.com/mattiasflodin/reckless) | 42 | 46 | 47 | 48 | 54 | 78 |
| [Iyengar NanoLog](http://github.com/Iyengar111/NanoLog) | 53 | 62 | 93 | 122 | 150 | 216 |
| [spdlog](http://github.com/gabime/spdlog) | 209 | 236 | 276 | 304 | 409 | 700 |
| [g3log](http://github.com/KjellKod/g3log) | 1344 | 1415 | 1489 | 1557 | 1815 | 5855 |
#### Logging Large Strings
The following message is logged 100'000 times for each thread:
`LOG_INFO(logger, "Logging int: {}, int: {}, string: {}", i, j, large_string)`.
The large string used in the log message is over 35 characters to prevent the short string optimization
of `std::string`.
##### 1 Thread Logging
| Library | 50th | 75th | 90th | 95th | 99th | 99.9th |
|---------------------------------------------------------------------|:----:|:----:|:----:|:----:|:----:|:------:|
| [Quill v3.8 Unbounded Queue](http://github.com/odygrd/quill) | 10 | 12 | 13 | 13 | 14 | 16 |
| [Quill v4.1 Bounded Dropping Queue](http://github.com/odygrd/quill) | 11 | 12 | 13 | 14 | 15 | 17 |
| [fmtlog](http://github.com/MengRao/fmtlog) | 11 | 12 | 13 | 14 | 15 | 17 |
| [PlatformLab NanoLog](http://github.com/PlatformLab/NanoLog) | 13 | 14 | 15 | 15 | 17 | 19 |
| [Quill v4.1 Unbounded Queue](http://github.com/odygrd/quill) | 13 | 14 | 16 | 16 | 17 | 21 |
| [MS BinLog](http://github.com/Morgan-Stanley/binlog) | 22 | 23 | 23 | 25 | 30 | 59 |
| [Iyengar NanoLog](http://github.com/Iyengar111/NanoLog) | 52 | 55 | 64 | 83 | 114 | 160 |
| [Reckless](http://github.com/mattiasflodin/reckless) | 102 | 122 | 134 | 137 | 143 | 153 |
| [spdlog](http://github.com/gabime/spdlog) | 120 | 123 | 127 | 130 | 138 | 145 |
| [g3log](http://github.com/KjellKod/g3log) | 955 | 1049 | 1129 | 1190 | 1351 | 1539 |
##### 4 Threads Logging Simultaneously
| Library | 50th | 75th | 90th | 95th | 99th | 99.9th |
|---------------------------------------------------------------------|:----:|:----:|:----:|:----:|:----:|:------:|
| [Quill v4.1 Bounded Dropping Queue](http://github.com/odygrd/quill) | 11 | 12 | 13 | 15 | 16 | 18 |
| [fmtlog](http://github.com/MengRao/fmtlog) | 11 | 12 | 13 | 15 | 16 | 18 |
| [Quill v3.8 Unbounded Queue](http://github.com/odygrd/quill) | 12 | 13 | 14 | 15 | 16 | 19 |
| [PlatformLab NanoLog](http://github.com/PlatformLab/NanoLog) | 13 | 15 | 15 | 16 | 17 | 20 |
| [Quill v4.1 Unbounded Queue](http://github.com/odygrd/quill) | 14 | 16 | 17 | 18 | 19 | 22 |
| [MS BinLog](http://github.com/Morgan-Stanley/binlog) | 23 | 24 | 24 | 25 | 31 | 62 |
| [Iyengar NanoLog](http://github.com/Iyengar111/NanoLog) | 53 | 60 | 92 | 121 | 149 | 212 |
| [Reckless](http://github.com/mattiasflodin/reckless) | 101 | 121 | 133 | 136 | 143 | 160 |
| [spdlog](http://github.com/gabime/spdlog) | 186 | 215 | 266 | 297 | 381 | 641 |
| [g3log](http://github.com/KjellKod/g3log) | 1089 | 1164 | 1252 | 1328 | 1578 | 5563 |
#### Logging Complex Types
The following message is logged 100'000 times for each thread:
`LOG_INFO(logger, "Logging int: {}, int: {}, vector: {}", i, j, v)`.
Logging `std::vector<std::string> v` containing 16 large strings, each ranging from 50 to 60 characters.
The strings used in the log message are over 35 characters to prevent the short string optimization of `std::string`.
##### 1 Thread Logging
| Library | 50th | 75th | 90th | 95th | 99th | 99.9th |
|---------------------------------------------------------------------|:----:|:----:|:----:|:----:|:----:|:------:|
| [Quill v4.1 Unbounded Queue](http://github.com/odygrd/quill) | 52 | 54 | 56 | 58 | 60 | 63 |
| [Quill v4.1 Bounded Dropping Queue](http://github.com/odygrd/quill) | 53 | 55 | 57 | 59 | 62 | 103 |
| [MS BinLog](http://github.com/Morgan-Stanley/binlog) | 66 | 70 | 79 | 81 | 84 | 91 |
| [Quill v3.8 Unbounded Queue](http://github.com/odygrd/quill) | 632 | 651 | 676 | 698 | 737 | 1049 |
| [fmtlog](http://github.com/MengRao/fmtlog) | 724 | 752 | 776 | 789 | 814 | 857 |
| [spdlog](http://github.com/gabime/spdlog) | 6242 | 6331 | 6438 | 6523 | 6782 | 7341 |
##### 4 Threads Logging Simultaneously
| Library | 50th | 75th | 90th | 95th | 99th | 99.9th |
|---------------------------------------------------------------------|:----:|:----:|:----:|:----:|:----:|:------:|
| [Quill v4.1 Bounded Dropping Queue](http://github.com/odygrd/quill) | 55 | 57 | 59 | 61 | 64 | 77 |
| [MS BinLog](http://github.com/Morgan-Stanley/binlog) | 70 | 74 | 83 | 85 | 88 | 102 |
| [Quill v4.1 Unbounded Queue](http://github.com/odygrd/quill) | 92 | 100 | 110 | 118 | 135 | 157 |
| [Quill v3.8 Unbounded Queue](http://github.com/odygrd/quill) | 674 | 694 | 736 | 762 | 805 | 884 |
| [fmtlog](http://github.com/MengRao/fmtlog) | 789 | 813 | 833 | 845 | 872 | 908 |
| [spdlog](http://github.com/gabime/spdlog) | 6500 | 6596 | 6724 | 6848 | 7560 | 9036 |
The benchmark was conducted on `Linux RHEL 9` with an `Intel Core i5-12600` at 4.8 GHz.
The cpus are isolated on this system and each thread was pinned to a different CPU. `GCC 13.1` was used as the compiler.
The benchmark methodology involved logging 20 messages in a loop, calculating and storing the average latency for those
20 messages, then waiting around ~2 milliseconds, and repeating this process for a specified number of iterations.
_In the `Quill Bounded Dropping` benchmarks, the dropping queue size is set to `262,144` bytes, which is double the
default size of `131,072` bytes._
You can find the benchmark code on the [logger_benchmarks](http://github.com/odygrd/logger_benchmarks) repository.
### Throughput
The maximum throughput is measured by determining the maximum number of log messages the backend logging thread can
write to the log file per second.
When measured on the same system as the latency benchmarks mentioned above the average throughput of the backend
logging thread is `4.56 million msgs/sec`
While the primary focus of the library is not on throughput, it does provide efficient handling of log messages across
multiple threads. The backend logging thread, responsible for formatting and ordering log messages from hot threads,
ensures that all queues are emptied on a high priority basis. The backend thread internally buffers the log messages
and then writes them later when the caller thread queues are empty or when a predefined limit,
`backend_thread_transit_events_soft_limit`, is reached. This approach prevents the need for allocating new queues
or dropping messages on the hot path.
Comparing throughput with other logging libraries in an asynchronous logging scenario has proven challenging. Some
libraries may drop log messages, resulting in smaller log files than expected, while others only offer asynchronous
flush, making it difficult to determine when the logging thread has finished processing all messages.
In contrast, Quill provides a blocking flush log guarantee, ensuring that every log message from the hot threads up to
that point is flushed to the file.
For benchmarking purposes, you can find the
code [here](https://github.com/odygrd/quill/blob/master/benchmarks/backend_throughput/quill_backend_throughput.cpp).
### Compilation Time
Compile times are measured using `clang 15` and for `Release` build.
Below, you can find the additional headers that the library will include when you need to log, following
the [recommended_usage](https://github.com/odygrd/quill/blob/master/examples/recommended_usage/recommended_usage.cpp)
example
![quill_v4_1_compiler_profile.speedscope.png](docs%2Fquill_v4_1_compiler_profile.speedscope.png)
There is also a compile-time benchmark measuring the compilation time of 2000 auto-generated log statements with
various arguments. You can find
it [here](https://github.com/odygrd/quill/blob/master/benchmarks/compile_time/compile_time_bench.cpp). It takes approximately 30
seconds to compile.
![quill_v4_compiler_bench.speedscope.png](docs%2Fquill_v4_compiler_bench.speedscope.png)
## Quick Start
```c++
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include "quill/sinks/FileSink.h"
int main()
{
// Start the backend thread
quill::Backend::start();
// Log to file
auto file_sink = quill::Frontend::create_or_get_sink<quill::FileSink>(
"example_file_logging.log");
quill::Logger* logger =
quill::Frontend::create_or_get_logger("root", std::move(file_sink));
// set the log level of the logger to trace_l3 (default is info)
logger->set_log_level(quill::LogLevel::TraceL3);
LOG_INFO(logger, "Welcome to Quill!");
LOG_ERROR(logger, "An error message. error code {}", 123);
LOG_WARNING(logger, "A warning message.");
LOG_CRITICAL(logger, "A critical error.");
LOG_DEBUG(logger, "Debugging foo {}", 1234);
LOG_TRACE_L1(logger, "{:>30}", "right aligned");
LOG_TRACE_L2(logger, "Positional arguments are {1} {0} ", "too", "supported");
LOG_TRACE_L3(logger, "Support for floats {:03.2f}", 1.23456);
}
```
```c++
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include "quill/sinks/ConsoleSink.h"
int main()
{
// Start the backend thread
quill::Backend::start();
// Frontend
auto console_sink = quill::Frontend::create_or_get_sink<quill::ConsoleSink>("sink_id_1");
quill::Logger* logger = quill::Frontend::create_or_get_logger("root", std::move(console_sink));
// Change the LogLevel to print everything
logger->set_log_level(quill::LogLevel::TraceL3);
LOG_INFO(logger, "Welcome to Quill!");
LOG_ERROR(logger, "An error message. error code {}", 123);
LOG_WARNING(logger, "A warning message.");
LOG_CRITICAL(logger, "A critical error.");
LOG_DEBUG(logger, "Debugging foo {}", 1234);
LOG_TRACE_L1(logger, "{:>30}", "right aligned");
LOG_TRACE_L2(logger, "Positional arguments are {1} {0} ", "too", "supported");
LOG_TRACE_L3(logger, "Support for floats {:03.2f}", 1.23456);
}
```
### Output
[![Screenshot-2020-08-14-at-01-09-43.png](http://i.postimg.cc/02Vbt8LH/Screenshot-2020-08-14-at-01-09-43.png)](http://postimg.cc/LnZ95M4z)
## CMake-Integration
#### External
##### Building and Installing Quill
```
git clone http://github.com/odygrd/quill.git
mkdir cmake_build
cd cmake_build
cmake ..
make install
```
Note: To install in custom directory invoke cmake with `-DCMAKE_INSTALL_PREFIX=/quill/install-dir/`
Then use the library from a CMake project, you can locate it directly with `find_package()`
##### Directory Structure
```
my_project/
├── CMakeLists.txt
├── main.cpp
```
##### CMakeLists.txt
```cmake
# Set only if needed - quill was installed under a custom non-standard directory
set(CMAKE_PREFIX_PATH /test_quill/usr/local/)
find_package(quill REQUIRED)
# Linking your project against quill
add_executable(example main.cpp)
target_link_libraries(example PUBLIC quill::quill)
```
#### Embedded
To embed the library directly, copy the source [folder](http://github.com/odygrd/quill/tree/master/quill/quill) to your
project and call `add_subdirectory()` in your `CMakeLists.txt` file
##### Directory Structure
```
my_project/
├── quill/ (source folder)
├── CMakeLists.txt
├── main.cpp
```
##### CMakeLists.txt
```cmake
cmake_minimum_required(VERSION 3.1.0)
project(my_project)
set(CMAKE_CXX_STANDARD 17)
add_subdirectory(quill)
add_executable(my_project main.cpp)
target_link_libraries(my_project PUBLIC quill::quill)
```
#### Building Quill for Android NDK
To build Quill for Android NDK add the following `CMake` flags when configuring the build:
```
-DQUILL_NO_THREAD_NAME_SUPPORT:BOOL=ON
```
## Design
### Frontend (caller-thread)
When invoking a `LOG_` macro:
1. Creates a static constexpr metadata object to store `Metadata` such as the format string and source location.
2. Pushes the data SPSC lock-free queue. For each log message, the following variables are pushed
| Variable | Description |
|------------|:--------------------------------------------------------------------------------------------------------------:|
| timestamp | Current timestamp |
| Metadata* | Pointer to metadata information |
| Logger* | Pointer to the logger instance |
| DecodeFunc | A pointer to a templated function containing all the log message argument types, used for decoding the message |
| Args... | A serialized binary copy of each log message argument that was passed to the `LOG_` macro |
### Backend
Consumes each message from the SPSC queue, retrieves all the necessary information and then formats the message.
Subsequently, forwards the log message to all Sinks associated with the Logger.
![design.jpg](docs%2Fdesign.jpg)
## License
Quill is licensed under the [MIT License](http://opensource.org/licenses/MIT)
Quill depends on third party libraries with separate copyright notices and license terms.
Your use of the source code for these subcomponents is subject to the terms and conditions of the following licenses.
- ([MIT License](http://opensource.org/licenses/MIT)) {fmt} (http://github.com/fmtlib/fmt/blob/master/LICENSE.rst)
- ([MIT License](http://opensource.org/licenses/MIT)) doctest (http://github.com/onqtam/doctest/blob/master/LICENSE.txt)

View file

@ -0,0 +1,3 @@
add_subdirectory(hot_path_latency)
add_subdirectory(backend_throughput)
add_subdirectory(compile_time)

View file

@ -0,0 +1,7 @@
add_executable(BENCHMARK_quill_backend_throughput quill_backend_throughput.cpp)
set_common_compile_options(BENCHMARK_quill_backend_throughput)
target_link_libraries(BENCHMARK_quill_backend_throughput quill)
add_executable(BENCHMARK_quill_backend_throughput_no_buffering quill_backend_throughput_no_buffering.cpp)
set_common_compile_options(BENCHMARK_quill_backend_throughput_no_buffering)
target_link_libraries(BENCHMARK_quill_backend_throughput_no_buffering quill)

View file

@ -0,0 +1,65 @@
#include <chrono>
#include <iostream>
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/sinks/FileSink.h"
static constexpr size_t total_iterations = 4'000'000;
/**
* The backend worker just spins, so we just measure the total time elapsed for total_iterations
*/
int main()
{
// main thread affinity
quill::detail::set_cpu_affinity(0);
quill::BackendOptions backend_options;
backend_options.backend_cpu_affinity = 5;
// Start the logging backend thread and give it some tiem to init
quill::Backend::start(backend_options);
std::this_thread::sleep_for(std::chrono::milliseconds{100});
// Create a file sink to write to a file
std::shared_ptr<quill::Sink> file_sink = quill::Frontend::create_or_get_sink<quill::FileSink>(
"quill_backend_total_time.log",
[]()
{
quill::FileSinkConfig cfg;
cfg.set_open_mode('w');
return cfg;
}(),
quill::FileEventNotifier{});
quill::Logger* logger = quill::Frontend::create_or_get_logger(
"bench_logger", std::move(file_sink),
"%(time) [%(thread_id)] %(short_source_location) %(log_level) %(message)");
quill::Frontend::preallocate();
// start counting the time until backend worker finishes
auto const start_time = std::chrono::steady_clock::now();
for (size_t iteration = 0; iteration < total_iterations; ++iteration)
{
LOG_INFO(logger, "Iteration: {} int: {} double: {}", iteration, iteration * 2,
static_cast<double>(iteration) / 2);
}
// block until all messages are flushed
logger->flush_log();
auto const end_time = std::chrono::steady_clock::now();
auto const delta = end_time - start_time;
auto delta_d = std::chrono::duration_cast<std::chrono::duration<double>>(delta).count();
std::cout << fmtquill::format(
"Throughput is {:.2f} million msgs/sec average, total time elapsed: {} ms for {} "
"log messages \n",
total_iterations / delta_d / 1e6,
std::chrono::duration_cast<std::chrono::milliseconds>(delta).count(), total_iterations)
<< std::endl;
}

View file

@ -0,0 +1,66 @@
#include <chrono>
#include <iostream>
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/sinks/FileSink.h"
static constexpr size_t total_iterations = 4'000'000;
/**
* The backend worker just spins, so we just measure the total time elapsed for total_iterations
*/
int main()
{
// main thread affinity
quill::detail::set_cpu_affinity(0);
quill::BackendOptions backend_options;
backend_options.backend_cpu_affinity = 5;
backend_options.transit_events_hard_limit = 1;
// Start the logging backend thread and give it some tiem to init
quill::Backend::start(backend_options);
std::this_thread::sleep_for(std::chrono::milliseconds{100});
// Create a file sink to write to a file
std::shared_ptr<quill::Sink> file_sink = quill::Frontend::create_or_get_sink<quill::FileSink>(
"quill_backend_total_time.log",
[]()
{
quill::FileSinkConfig cfg;
cfg.set_open_mode('w');
return cfg;
}(),
quill::FileEventNotifier{});
quill::Logger* logger = quill::Frontend::create_or_get_logger(
"bench_logger", std::move(file_sink),
"%(time) [%(thread_id)] %(short_source_location) %(log_level) %(message)");
quill::Frontend::preallocate();
// start counting the time until backend worker finishes
auto const start_time = std::chrono::steady_clock::now();
for (size_t iteration = 0; iteration < total_iterations; ++iteration)
{
LOG_INFO(logger, "Iteration: {} int: {} double: {}", iteration, iteration * 2,
static_cast<double>(iteration) / 2);
}
// block until all messages are flushed
logger->flush_log();
auto const end_time = std::chrono::steady_clock::now();
auto const delta = end_time - start_time;
auto delta_d = std::chrono::duration_cast<std::chrono::duration<double>>(delta).count();
std::cout << fmtquill::format(
"Throughput is {:.2f} million msgs/sec average, total time elapsed: {} ms for {} "
"log messages \n",
total_iterations / delta_d / 1e6,
std::chrono::duration_cast<std::chrono::milliseconds>(delta).count(), total_iterations)
<< std::endl;
}

View file

@ -0,0 +1,5 @@
add_subdirectory(qwrapper)
add_executable(BENCHMARK_quill_compile_time compile_time_bench.cpp)
set_common_compile_options(BENCHMARK_quill_compile_time)
target_link_libraries(BENCHMARK_quill_compile_time qwrapper_compile_time_bench)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,57 @@
import random
def generate_log_statements(num_statements):
argument_types = [
'1', '2', '3.0', '4.0f', '5L', '6LL', '7UL', '8ULL', 'true', 'false',
'"example1"', '"example2"', '"example3"', 'std::string("str1")',
'std::string("str2")', 'std::string_view("view1")', 'std::string_view("view2")',
'static_cast<short>(9)', 'static_cast<unsigned short>(10)'
]
random_words = ["quick", "brown", "fox", "jumps", "over", "lazy", "dog", "logging", "test", "example"]
statements = []
for i in range(num_statements):
num_args = random.randint(1, 10) # Number of arguments for the log statement
args = random.sample(argument_types, num_args)
placeholders = ' '.join(["{}" for _ in args])
num_words = random.randint(3, 4) # Number of random words in the log message
words = ' '.join(random.sample(random_words, num_words))
statement = f' LOG_INFO(logger, "{words} {placeholders}", {", ".join(args)});'
statements.append(statement)
return statements
def write_to_file(filename, statements):
with open(filename, 'w') as f:
f.write('#include "quill/Backend.h"\n')
f.write('#include "quill/Frontend.h"\n')
f.write('#include "quill/LogMacros.h"\n')
f.write('#include "quill/Logger.h"\n')
f.write('#include "quill/sinks/ConsoleSink.h"\n')
f.write('#include <string>\n')
f.write('#include <utility>\n\n')
f.write('/**\n')
f.write(' * Trivial logging example to console\n')
f.write(' */\n\n')
f.write('int main()\n')
f.write('{\n')
f.write(' // Start the backend thread\n')
f.write(' quill::BackendOptions backend_options;\n')
f.write(' quill::Backend::start(backend_options);\n\n')
f.write(' // Frontend\n')
f.write(' auto console_sink = quill::Frontend::create_or_get_sink<quill::ConsoleSink>("sink_id_1");\n')
f.write(' quill::Logger* logger = quill::Frontend::create_or_get_logger("root", std::move(console_sink));\n\n')
for statement in statements:
f.write(f'{statement}\n')
f.write('\n return 0;\n')
f.write('}\n')
if __name__ == '__main__':
num_statements = 2000
statements = generate_log_statements(num_statements)
write_to_file('log_benchmark.cpp', statements)

View file

@ -0,0 +1,16 @@
set(LIB_NAME qwrapper_compile_time_bench)
add_library(${LIB_NAME} STATIC
include/qwrapper/qwrapper.h
include/qwrapper/qwrapper.cpp)
# Add include directories for this library
target_include_directories(${LIB_NAME}
PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:include>
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR})
# Link quill dependency
target_link_libraries(${LIB_NAME} PUBLIC quill::quill)

View file

@ -0,0 +1,18 @@
#include "qwrapper.h"
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/Logger.h"
#include "quill/sinks/ConsoleSink.h"
void setup_quill(char const* log_file)
{
quill::Backend::start();
auto console_sink = quill::Frontend::create_or_get_sink<quill::ConsoleSink>("s1");
quill::Frontend::create_or_get_logger("root", std::move(console_sink),
"%(time) [%(thread_id)] %(short_source_location:<28) "
"LOG_%(log_level:<9) %(logger:<12) %(message)",
"%H:%M:%S.%Qns", quill::Timezone::GmtTime);
}

View file

@ -0,0 +1,3 @@
#pragma once
void setup_quill(char const* log_file);

View file

@ -0,0 +1,7 @@
add_executable(BENCHMARK_quill_hot_path_rdtsc_clock hot_path_bench_config.h hot_path_bench.h quill_hot_path_rdtsc_clock.cpp)
set_common_compile_options(BENCHMARK_quill_hot_path_rdtsc_clock)
target_link_libraries(BENCHMARK_quill_hot_path_rdtsc_clock quill)
add_executable(BENCHMARK_quill_hot_path_system_clock hot_path_bench_config.h hot_path_bench.h quill_hot_path_system_clock.cpp)
set_common_compile_options(BENCHMARK_quill_hot_path_system_clock)
target_link_libraries(BENCHMARK_quill_hot_path_system_clock quill)

View file

@ -0,0 +1,204 @@
/**
* Copyright(c) 2020-present, Odysseas Georgoudis & quill contributors.
* Distributed under the MIT License (http://opensource.org/licenses/MIT)
*/
#pragma once
#include "hot_path_bench_config.h"
#include "quill/backend/BackendUtilities.h"
#include "quill/backend/RdtscClock.h"
#include "quill/core/Rdtsc.h"
#include <algorithm>
#include <chrono>
#include <cstdint>
#include <functional>
#include <iostream>
#include <numeric>
#include <random>
#include <thread>
#if defined(_WIN32)
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
inline uint16_t get_cpu_to_pin_thread(uint16_t thread_num)
{
auto const num_cores = static_cast<uint16_t>(std::thread::hardware_concurrency());
// If hardware_concurrency feature is not supported, zero value is returned.
if (num_cores == 0)
return 0;
return thread_num % num_cores;
}
// Instead of sleep
inline void wait(std::chrono::nanoseconds min, std::chrono::nanoseconds max)
{
#ifdef PERF_ENABLED
// when in perf use sleep as the other variables add noise
std::this_thread::sleep_for(max);
#else
static std::random_device rd;
static std::mt19937 gen(rd());
static std::uniform_int_distribution<> dis(static_cast<int>(min.count()), static_cast<int>(max.count()));
auto const start_time = std::chrono::steady_clock::now();
auto const end_time = start_time.time_since_epoch() + std::chrono::nanoseconds{dis(gen)};
std::chrono::nanoseconds time_now;
do
{
time_now = std::chrono::steady_clock::now().time_since_epoch();
} while (time_now < end_time);
#endif
}
#ifdef PERF_ENABLED
/***/
inline void run_log_benchmark(size_t num_iterations, size_t messages_per_iteration,
std::function<void()> on_thread_start,
std::function<void(uint64_t, uint64_t, double)> log_func,
std::function<void()> on_thread_exit, size_t current_thread_num)
{
// running thread affinity
quill::detail::set_cpu_affinity(get_cpu_to_pin_thread(current_thread_num));
on_thread_start();
unsigned int aux;
// Main Benchmark
for (size_t iteration = 0; iteration < num_iterations; ++iteration)
{
double const d = iteration + (0.1 * iteration);
auto const start = __rdtscp(&aux);
for (size_t i = 0; i < messages_per_iteration; ++i)
{
log_func(iteration, i, d);
}
auto const end = __rdtscp(&aux);
// send the next batch of messages after x time
wait(MIN_WAIT_DURATION, MAX_WAIT_DURATION);
}
on_thread_exit();
}
#else
/***/
inline void run_log_benchmark(size_t num_iterations, size_t messages_per_iteration,
std::function<void()> const& on_thread_start,
std::function<void(uint64_t, uint64_t, double)> const& log_func,
std::function<void()> const& on_thread_exit, uint16_t current_thread_num,
std::vector<uint64_t>& latencies, double rdtsc_ns_per_tick)
{
// running thread affinity
quill::detail::set_cpu_affinity(get_cpu_to_pin_thread(current_thread_num));
on_thread_start();
unsigned int aux;
// Main Benchmark
for (size_t iteration = 0; iteration < num_iterations; ++iteration)
{
double const d = static_cast<double>(iteration) + (0.1 * static_cast<double>(iteration));
auto const start = __rdtscp(&aux);
for (size_t i = 0; i < messages_per_iteration; ++i)
{
log_func(iteration, i, d);
}
auto const end = __rdtscp(&aux);
uint64_t const latency{static_cast<uint64_t>(
static_cast<double>((end - start)) / static_cast<double>(messages_per_iteration) * rdtsc_ns_per_tick)};
latencies.push_back(latency);
// send the next batch of messages after x time
wait(MIN_WAIT_DURATION, MAX_WAIT_DURATION);
}
on_thread_exit();
}
#endif
/***/
inline void run_benchmark(char const* benchmark_name, uint16_t thread_count, size_t num_iterations,
size_t messages_per_iteration, std::function<void()> const& on_thread_start,
std::function<void(uint64_t, uint64_t, double)> const& log_func,
std::function<void()> const& on_thread_exit)
{
// main thread affinity
quill::detail::set_cpu_affinity(0);
#ifndef PERF_ENABLED
std::cout << "running for " << thread_count << " thread(s)" << std::endl;
quill::detail::RdtscClock rdtsc_clock{std::chrono::minutes{30}};
// each thread gets a vector of latencies
std::vector<std::vector<uint64_t>> latencies;
latencies.resize(thread_count);
for (auto& elem : latencies)
{
elem.reserve(num_iterations);
}
#endif
std::vector<std::thread> threads;
threads.reserve(thread_count);
for (uint16_t thread_num = 0; thread_num < thread_count; ++thread_num)
{
#ifdef PERF_ENABLED
// Spawn num threads
threads.emplace_back(run_log_benchmark, num_iterations, (messages_per_iteration / thread_count),
on_thread_start, log_func, on_thread_exit, thread_num + 1);
#else
// Spawn num threads
threads.emplace_back(run_log_benchmark, num_iterations,
static_cast<size_t>(messages_per_iteration / thread_count),
std::ref(on_thread_start), std::ref(log_func), std::ref(on_thread_exit),
static_cast<uint16_t>(thread_num + 1u), std::ref(latencies[thread_num]),
rdtsc_clock.nanoseconds_per_tick());
#endif
}
// Wait for threads to finish
for (uint16_t i = 0; i < thread_count; ++i)
{
threads[i].join();
}
#ifndef PERF_ENABLED
// All threads have finished we can read all latencies
std::vector<uint64_t> latencies_combined;
latencies_combined.reserve(num_iterations * thread_count);
for (auto const& elem : latencies)
{
latencies_combined.insert(latencies_combined.end(), elem.begin(), elem.end());
}
// Sort all latencies
std::sort(latencies_combined.begin(), latencies_combined.end());
std::cout
<< "Thread Count " << thread_count << " - Total messages " << latencies_combined.size() * messages_per_iteration
<< " - " << benchmark_name << "\n | 50th | 75th | 90th | 95th | 99th | 99.9th | Worst |\n"
<< " | "
<< latencies_combined[static_cast<size_t>(static_cast<double>(num_iterations * thread_count) * 0.5)] << " | "
<< latencies_combined[static_cast<size_t>(static_cast<double>(num_iterations * thread_count) * 0.75)]
<< " | "
<< latencies_combined[static_cast<size_t>(static_cast<double>(num_iterations * thread_count) * 0.9)] << " | "
<< latencies_combined[static_cast<size_t>(static_cast<double>(num_iterations * thread_count) * 0.95)]
<< " | "
<< latencies_combined[static_cast<size_t>(static_cast<double>(num_iterations * thread_count) * 0.99)]
<< " | "
<< latencies_combined[static_cast<size_t>(static_cast<double>(num_iterations * thread_count) * 0.999)]
<< " | " << latencies_combined[static_cast<size_t>(latencies_combined.size() - 1)] << " |\n\n";
#endif
}

View file

@ -0,0 +1,39 @@
/**
* Copyright(c) 2020-present, Odysseas Georgoudis & quill contributors.
* Distributed under the MIT License (http://opensource.org/licenses/MIT)
*/
#pragma once
#include <chrono>
/**
* When running the benchmark using e.g. perf, enable this definition to remove extra noise
* from calculating and printing the results.
*
* To see shared cached lines :
* perf c2c record -g --call-graph dwarf,8192 ./benchmark_quill_call_site_latency
* perf c2c report -NN -g --call-graph -c pid,iaddr --stdio
*/
// #define PERF_ENABLED
#define THREAD_LIST_COUNT \
std::vector<uint16_t> { 1, 4 }
#define MESSAGES_PER_ITERATION \
std::size_t { 20 }
#define ITERATIONS \
std::size_t { 100000 }
/**
* Min-Max wait duration between each iteration - This lets the backend thread catch up
* a little bit with the caller thread, because the caller thread is so much faster.
* When the backend thread can't catch up it will cause the caller thread on the hot path
* to reallocate more space in the queue slowing it down.
* This benchmark is measuring latency not high throughput
* **/
#define MIN_WAIT_DURATION \
std::chrono::microseconds { 2000 }
#define MAX_WAIT_DURATION \
std::chrono::microseconds { 2200 }

View file

@ -0,0 +1,93 @@
/**
* Adding a benchmark for a another logger should be straight forward by duplicating and modifying
* this file.
*/
#include "hot_path_bench.h"
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/sinks/FileSink.h"
struct FrontendOptions
{
static constexpr quill::QueueType queue_type = quill::QueueType::UnboundedBlocking;
static constexpr uint32_t initial_queue_capacity = 131'072;
static constexpr uint32_t blocking_queue_retry_interval_ns = 800;
static constexpr bool huge_pages_enabled = false;
};
using Frontend = quill::FrontendImpl<FrontendOptions>;
using Logger = quill::LoggerImpl<FrontendOptions>;
/***/
void quill_benchmark(std::vector<uint16_t> const& thread_count_array,
size_t num_iterations_per_thread, size_t messages_per_iteration)
{
/** - MAIN THREAD START - Logger setup if any **/
/** - Setup Quill **/
// main thread affinity
quill::detail::set_cpu_affinity(0);
quill::BackendOptions backend_options;
backend_options.backend_cpu_affinity = 5;
// Start the logging backend thread and give it some tiem to init
quill::Backend::start(backend_options);
std::this_thread::sleep_for(std::chrono::milliseconds{100});
// wait for the backend thread to start
std::this_thread::sleep_for(std::chrono::seconds(1));
// Create a file sink to write to a file
std::shared_ptr<quill::Sink> file_sink = Frontend::create_or_get_sink<quill::FileSink>(
"quill_hot_path_rdtsc_clock.log",
[]()
{
quill::FileSinkConfig cfg;
cfg.set_open_mode('w');
return cfg;
}(),
quill::FileEventNotifier{});
Logger* logger = Frontend::create_or_get_logger(
"bench_logger", std::move(file_sink),
"%(time) [%(thread_id)] %(short_source_location) %(log_level) %(message)");
/** LOGGING THREAD FUNCTIONS - on_start, on_exit, log_func must be implemented **/
/** those run on a several thread(s). It can be one or multiple threads based on THREAD_LIST_COUNT config */
auto on_start = []() {
// on thread start
Frontend::preallocate();
};
auto on_exit = [logger]()
{
// on thread exit we block flush, so the next benchmark starts with the backend thread ready
// to process the messages
logger->flush_log();
};
// on main
auto log_func = [logger](uint64_t k, uint64_t i, double d) {
// Main logging function
// This will get called MESSAGES_PER_ITERATION * ITERATIONS for each caller thread.
// MESSAGES_PER_ITERATION will get averaged to a single number
LOG_INFO(logger, "Logging iteration: {}, message: {}, double: {}", k, i, d);
};
/** ALWAYS REQUIRED **/
// Run the benchmark for n threads
for (auto thread_count : thread_count_array)
{
run_benchmark("Logger: Quill - Benchmark: Hot Path Latency / Nanoseconds", thread_count,
num_iterations_per_thread, messages_per_iteration, on_start, log_func, on_exit);
}
}
/***/
int main(int, char**) { quill_benchmark(THREAD_LIST_COUNT, ITERATIONS, MESSAGES_PER_ITERATION); }

View file

@ -0,0 +1,96 @@
/**
* Adding a benchmark for a another logger should be straight forward by duplicating and modifying
* this file.
*/
#include "hot_path_bench.h"
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/sinks/FileSink.h"
struct FrontendOptions
{
static constexpr quill::QueueType queue_type = quill::QueueType::UnboundedBlocking;
static constexpr uint32_t initial_queue_capacity = 131'072;
static constexpr uint32_t blocking_queue_retry_interval_ns = 800;
static constexpr bool huge_pages_enabled = false;
};
using Frontend = quill::FrontendImpl<FrontendOptions>;
using Logger = quill::LoggerImpl<FrontendOptions>;
/***/
void quill_benchmark(std::vector<uint16_t> const& thread_count_array,
size_t num_iterations_per_thread, size_t messages_per_iteration)
{
/** - MAIN THREAD START - Logger setup if any **/
/** - Setup Quill **/
// main thread affinity
quill::detail::set_cpu_affinity(0);
quill::BackendOptions backend_options;
backend_options.backend_cpu_affinity = 5;
// Start the logging backend thread and give it some tiem to init
quill::Backend::start(backend_options);
std::this_thread::sleep_for(std::chrono::milliseconds{100});
// wait for the backend thread to start
std::this_thread::sleep_for(std::chrono::seconds(1));
// Create a file sink to write to a file
std::shared_ptr<quill::Sink> file_sink = Frontend::create_or_get_sink<quill::FileSink>(
"quill_hot_path_rdtsc_clock.log",
[]()
{
quill::FileSinkConfig cfg;
cfg.set_open_mode('w');
return cfg;
}(),
quill::FileEventNotifier{});
Logger* logger = Frontend::create_or_get_logger(
"bench_logger", std::move(file_sink),
"%(time) [%(thread_id)] %(short_source_location) %(log_level) %(message)", "%H:%M:%S.%Qns",
quill::Timezone::LocalTime, quill::ClockSourceType::System);
/** LOGGING THREAD FUNCTIONS - on_start, on_exit, log_func must be implemented **/
/** those run on a several thread(s). It can be one or multiple threads based on THREAD_LIST_COUNT config */
auto on_start = []()
{
// on thread start
Frontend::preallocate();
};
auto on_exit = [logger]()
{
// on thread exit we block flush, so the next benchmark starts with the backend thread ready
// to process the messages
logger->flush_log();
};
// on main
auto log_func = [logger](uint64_t k, uint64_t i, double d)
{
// Main logging function
// This will get called MESSAGES_PER_ITERATION * ITERATIONS for each caller thread.
// MESSAGES_PER_ITERATION will get averaged to a single number
LOG_INFO(logger, "Logging iteration: {}, message: {}, double: {}", k, i, d);
};
/** ALWAYS REQUIRED **/
// Run the benchmark for n threads
for (auto thread_count : thread_count_array)
{
run_benchmark("Logger: Quill - Benchmark: Hot Path Latency / Nanoseconds", thread_count,
num_iterations_per_thread, messages_per_iteration, on_start, log_func, on_exit);
}
}
/***/
int main(int, char**) { quill_benchmark(THREAD_LIST_COUNT, ITERATIONS, MESSAGES_PER_ITERATION); }

View file

@ -0,0 +1,11 @@
#Look for an executable called sphinx-build
find_program(SPHINX_EXECUTABLE
NAMES sphinx-build
DOC "Path to sphinx-build executable")
include(FindPackageHandleStandardArgs)
#Handle standard arguments to find_package like REQUIRED and QUIET
find_package_handle_standard_args(Sphinx
"Failed to find sphinx-build executable"
SPHINX_EXECUTABLE)

View file

@ -0,0 +1,21 @@
# Define the function to set common compile options
function(set_common_compile_options target_name)
cmake_parse_arguments(COMPILE_OPTIONS "" "VISIBILITY" "" ${ARGN})
# Set default visibility to PRIVATE if not provided
if (NOT DEFINED COMPILE_OPTIONS_VISIBILITY)
set(COMPILE_OPTIONS_VISIBILITY PRIVATE)
endif ()
target_compile_options(${target_name} ${COMPILE_OPTIONS_VISIBILITY}
$<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
-Wall -Wextra -Wconversion -pedantic -Wfatal-errors -Wno-unused-private-field -Wno-gnu-zero-variadic-macro-arguments -Wno-unused-parameter>
$<$<CXX_COMPILER_ID:MSVC>:/bigobj /WX /W4 /wd4324 /wd4189 /wd4996 /wd4100 /wd4127 /wd4702>)
# Additional MSVC specific options
if (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
if (NOT QUILL_NO_EXCEPTIONS)
target_compile_options(${target_name} ${COMPILE_OPTIONS_VISIBILITY} /EHsc)
endif ()
endif ()
endfunction()

View file

@ -0,0 +1,189 @@
# Distributed under the OSI-approved BSD 3-Clause License. See accompanying
# file Copyright.txt or https://cmake.org/licensing for details.
#[=======================================================================[.rst:
doctest
-----
This module defines a function to help use the doctest test framework.
The :command:`doctest_discover_tests` discovers tests by asking the compiled test
executable to enumerate its tests. This does not require CMake to be re-run
when tests change. However, it may not work in a cross-compiling environment,
and setting test properties is less convenient.
This command is intended to replace use of :command:`add_test` to register
tests, and will create a separate CTest test for each doctest test case. Note
that this is in some cases less efficient, as common set-up and tear-down logic
cannot be shared by multiple test cases executing in the same instance.
However, it provides more fine-grained pass/fail information to CTest, which is
usually considered as more beneficial. By default, the CTest test name is the
same as the doctest name; see also ``TEST_PREFIX`` and ``TEST_SUFFIX``.
.. command:: doctest_discover_tests
Automatically add tests with CTest by querying the compiled test executable
for available tests::
doctest_discover_tests(target
[TEST_SPEC arg1...]
[EXTRA_ARGS arg1...]
[WORKING_DIRECTORY dir]
[TEST_PREFIX prefix]
[TEST_SUFFIX suffix]
[PROPERTIES name1 value1...]
[ADD_LABELS value]
[TEST_LIST var]
[JUNIT_OUTPUT_DIR dir]
)
``doctest_discover_tests`` sets up a post-build command on the test executable
that generates the list of tests by parsing the output from running the test
with the ``--list-test-cases`` argument. This ensures that the full
list of tests is obtained. Since test discovery occurs at build time, it is
not necessary to re-run CMake when the list of tests changes.
However, it requires that :prop_tgt:`CROSSCOMPILING_EMULATOR` is properly set
in order to function in a cross-compiling environment.
Additionally, setting properties on tests is somewhat less convenient, since
the tests are not available at CMake time. Additional test properties may be
assigned to the set of tests as a whole using the ``PROPERTIES`` option. If
more fine-grained test control is needed, custom content may be provided
through an external CTest script using the :prop_dir:`TEST_INCLUDE_FILES`
directory property. The set of discovered tests is made accessible to such a
script via the ``<target>_TESTS`` variable.
The options are:
``target``
Specifies the doctest executable, which must be a known CMake executable
target. CMake will substitute the location of the built executable when
running the test.
``TEST_SPEC arg1...``
Specifies test cases, wildcarded test cases, tags and tag expressions to
pass to the doctest executable with the ``--list-test-cases`` argument.
``EXTRA_ARGS arg1...``
Any extra arguments to pass on the command line to each test case.
``WORKING_DIRECTORY dir``
Specifies the directory in which to run the discovered test cases. If this
option is not provided, the current binary directory is used.
``TEST_PREFIX prefix``
Specifies a ``prefix`` to be prepended to the name of each discovered test
case. This can be useful when the same test executable is being used in
multiple calls to ``doctest_discover_tests()`` but with different
``TEST_SPEC`` or ``EXTRA_ARGS``.
``TEST_SUFFIX suffix``
Similar to ``TEST_PREFIX`` except the ``suffix`` is appended to the name of
every discovered test case. Both ``TEST_PREFIX`` and ``TEST_SUFFIX`` may
be specified.
``PROPERTIES name1 value1...``
Specifies additional properties to be set on all tests discovered by this
invocation of ``doctest_discover_tests``.
``ADD_LABELS value``
Specifies if the test labels should be set automatically.
``TEST_LIST var``
Make the list of tests available in the variable ``var``, rather than the
default ``<target>_TESTS``. This can be useful when the same test
executable is being used in multiple calls to ``doctest_discover_tests()``.
Note that this variable is only available in CTest.
``JUNIT_OUTPUT_DIR dir``
If specified, the parameter is passed along with ``--reporters=junit``
and ``--out=`` to the test executable. The actual file name is the same
as the test target, including prefix and suffix. This should be used
instead of EXTRA_ARGS to avoid race conditions writing the XML result
output when using parallel test execution.
#]=======================================================================]
#------------------------------------------------------------------------------
function(doctest_discover_tests TARGET)
cmake_parse_arguments(
""
""
"TEST_PREFIX;TEST_SUFFIX;WORKING_DIRECTORY;TEST_LIST;JUNIT_OUTPUT_DIR"
"TEST_SPEC;EXTRA_ARGS;PROPERTIES;ADD_LABELS"
${ARGN}
)
if(NOT _WORKING_DIRECTORY)
set(_WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}")
endif()
if(NOT _TEST_LIST)
set(_TEST_LIST ${TARGET}_TESTS)
endif()
## Generate a unique name based on the extra arguments
string(SHA1 args_hash "${_TEST_SPEC} ${_EXTRA_ARGS}")
string(SUBSTRING ${args_hash} 0 7 args_hash)
# Define rule to generate test list for aforementioned test executable
set(ctest_include_file "${CMAKE_CURRENT_BINARY_DIR}/${TARGET}_include-${args_hash}.cmake")
set(ctest_tests_file "${CMAKE_CURRENT_BINARY_DIR}/${TARGET}_tests-${args_hash}.cmake")
get_property(crosscompiling_emulator
TARGET ${TARGET}
PROPERTY CROSSCOMPILING_EMULATOR
)
add_custom_command(
TARGET ${TARGET} POST_BUILD
BYPRODUCTS "${ctest_tests_file}"
COMMAND "${CMAKE_COMMAND}"
-D "TEST_TARGET=${TARGET}"
-D "TEST_EXECUTABLE=$<TARGET_FILE:${TARGET}>"
-D "TEST_EXECUTOR=${crosscompiling_emulator}"
-D "TEST_WORKING_DIR=${_WORKING_DIRECTORY}"
-D "TEST_SPEC=${_TEST_SPEC}"
-D "TEST_EXTRA_ARGS=${_EXTRA_ARGS}"
-D "TEST_PROPERTIES=${_PROPERTIES}"
-D "TEST_ADD_LABELS=${_ADD_LABELS}"
-D "TEST_PREFIX=${_TEST_PREFIX}"
-D "TEST_SUFFIX=${_TEST_SUFFIX}"
-D "TEST_LIST=${_TEST_LIST}"
-D "TEST_JUNIT_OUTPUT_DIR=${_JUNIT_OUTPUT_DIR}"
-D "CTEST_FILE=${ctest_tests_file}"
-P "${_DOCTEST_DISCOVER_TESTS_SCRIPT}"
VERBATIM
)
file(WRITE "${ctest_include_file}"
"if(EXISTS \"${ctest_tests_file}\")\n"
" include(\"${ctest_tests_file}\")\n"
"else()\n"
" add_test(${TARGET}_NOT_BUILT-${args_hash} ${TARGET}_NOT_BUILT-${args_hash})\n"
"endif()\n"
)
if(NOT CMAKE_VERSION VERSION_LESS 3.10)
# Add discovered tests to directory TEST_INCLUDE_FILES
set_property(DIRECTORY
APPEND PROPERTY TEST_INCLUDE_FILES "${ctest_include_file}"
)
else()
# Add discovered tests as directory TEST_INCLUDE_FILE if possible
get_property(test_include_file_set DIRECTORY PROPERTY TEST_INCLUDE_FILE SET)
if(NOT ${test_include_file_set})
set_property(DIRECTORY
PROPERTY TEST_INCLUDE_FILE "${ctest_include_file}"
)
else()
message(FATAL_ERROR
"Cannot set more than one TEST_INCLUDE_FILE"
)
endif()
endif()
endfunction()
###############################################################################
set(_DOCTEST_DISCOVER_TESTS_SCRIPT
${CMAKE_CURRENT_LIST_DIR}/doctestAddTests.cmake
)

View file

@ -0,0 +1,120 @@
# Distributed under the OSI-approved BSD 3-Clause License. See accompanying
# file Copyright.txt or https://cmake.org/licensing for details.
set(prefix "${TEST_PREFIX}")
set(suffix "${TEST_SUFFIX}")
set(spec ${TEST_SPEC})
set(extra_args ${TEST_EXTRA_ARGS})
set(properties ${TEST_PROPERTIES})
set(add_labels ${TEST_ADD_LABELS})
set(junit_output_dir "${TEST_JUNIT_OUTPUT_DIR}")
set(script)
set(suite)
set(tests)
function(add_command NAME)
set(_args "")
foreach(_arg ${ARGN})
if(_arg MATCHES "[^-./:a-zA-Z0-9_]")
set(_args "${_args} [==[${_arg}]==]") # form a bracket_argument
else()
set(_args "${_args} ${_arg}")
endif()
endforeach()
set(script "${script}${NAME}(${_args})\n" PARENT_SCOPE)
endfunction()
# Run test executable to get list of available tests
if(NOT EXISTS "${TEST_EXECUTABLE}")
message(FATAL_ERROR
"Specified test executable '${TEST_EXECUTABLE}' does not exist"
)
endif()
if("${spec}" MATCHES .)
set(spec "--test-case=${spec}")
endif()
execute_process(
COMMAND ${TEST_EXECUTOR} "${TEST_EXECUTABLE}" ${spec} --list-test-cases
OUTPUT_VARIABLE output
RESULT_VARIABLE result
WORKING_DIRECTORY "${TEST_WORKING_DIR}"
)
if(NOT ${result} EQUAL 0)
message(FATAL_ERROR
"Error running test executable '${TEST_EXECUTABLE}':\n"
" Result: ${result}\n"
" Output: ${output}\n"
)
endif()
string(REPLACE "\n" ";" output "${output}")
# Parse output
foreach(line ${output})
if("${line}" STREQUAL "===============================================================================" OR "${line}" MATCHES [==[^\[doctest\] ]==])
continue()
endif()
set(test ${line})
set(labels "")
if(${add_labels})
# get test suite that test belongs to
execute_process(
COMMAND ${TEST_EXECUTOR} "${TEST_EXECUTABLE}" --test-case=${test} --list-test-suites
OUTPUT_VARIABLE labeloutput
RESULT_VARIABLE labelresult
WORKING_DIRECTORY "${TEST_WORKING_DIR}"
)
if(NOT ${labelresult} EQUAL 0)
message(FATAL_ERROR
"Error running test executable '${TEST_EXECUTABLE}':\n"
" Result: ${labelresult}\n"
" Output: ${labeloutput}\n"
)
endif()
string(REPLACE "\n" ";" labeloutput "${labeloutput}")
foreach(labelline ${labeloutput})
if("${labelline}" STREQUAL "===============================================================================" OR "${labelline}" MATCHES [==[^\[doctest\] ]==])
continue()
endif()
list(APPEND labels ${labelline})
endforeach()
endif()
if(NOT "${junit_output_dir}" STREQUAL "")
# turn testname into a valid filename by replacing all special characters with "-"
string(REGEX REPLACE "[/\\:\"|<>]" "-" test_filename "${test}")
set(TEST_JUNIT_OUTPUT_PARAM "--reporters=junit" "--out=${junit_output_dir}/${prefix}${test_filename}${suffix}.xml")
else()
unset(TEST_JUNIT_OUTPUT_PARAM)
endif()
# use escape commas to handle properly test cases with commas inside the name
string(REPLACE "," "\\," test_name ${test})
# ...and add to script
add_command(add_test
"${prefix}${test}${suffix}"
${TEST_EXECUTOR}
"${TEST_EXECUTABLE}"
"--test-case=${test_name}"
"${TEST_JUNIT_OUTPUT_PARAM}"
${extra_args}
)
add_command(set_tests_properties
"${prefix}${test}${suffix}"
PROPERTIES
WORKING_DIRECTORY "${TEST_WORKING_DIR}"
${properties}
LABELS ${labels}
)
unset(labels)
list(APPEND tests "${prefix}${test}${suffix}")
endforeach()
# Create a list of all discovered tests, which users may use to e.g. set
# properties on the tests
add_command(set ${TEST_LIST} ${tests})
# Write CTest script
file(WRITE "${CTEST_FILE}" "${script}")

View file

@ -0,0 +1,55 @@
find_package(Doxygen REQUIRED)
# Find all the public headers
get_target_property(QUILL_PUBLIC_HEADER_DIR quill INTERFACE_INCLUDE_DIRECTORIES)
file(GLOB_RECURSE QUILL_PUBLIC_HEADERS ${QUILL_PUBLIC_HEADER_DIR}/*.h)
set(DOXYGEN_INPUT_DIR ${PROJECT_SOURCE_DIR}/quill)
set(DOXYGEN_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/doxygen)
set(DOXYGEN_INDEX_FILE ${DOXYGEN_OUTPUT_DIR}/xml/index.xml)
set(DOXYFILE_IN ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in)
set(DOXYFILE_OUT ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile)
#Replace variables inside @@ with the current values
configure_file(${DOXYFILE_IN} ${DOXYFILE_OUT} @ONLY)
file(MAKE_DIRECTORY ${DOXYGEN_OUTPUT_DIR}) #Doxygen won't create this for us
add_custom_command(OUTPUT ${DOXYGEN_INDEX_FILE}
DEPENDS ${QUILL_PUBLIC_HEADERS}
COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYFILE_OUT}
MAIN_DEPENDENCY ${DOXYFILE_OUT} ${DOXYFILE_IN}
COMMENT "Generating docs")
add_custom_target(Doxygen ALL DEPENDS ${DOXYGEN_INDEX_FILE})
find_package(Sphinx REQUIRED)
set(SPHINX_SOURCE ${CMAKE_CURRENT_SOURCE_DIR})
set(SPHINX_BUILD ${CMAKE_CURRENT_BINARY_DIR}/sphinx)
set(SPHINX_INDEX_FILE ${SPHINX_BUILD}/index.html)
# Only regenerate Sphinx when:
# - Doxygen has rerun
# - Our doc files have been updated
# - The Sphinx config has been updated
add_custom_command(OUTPUT ${SPHINX_INDEX_FILE}
COMMAND
${SPHINX_EXECUTABLE} -b html
# Tell Breathe where to find the Doxygen output
-Dbreathe_projects.Quill=${DOXYGEN_OUTPUT_DIR}/xml
${SPHINX_SOURCE} ${SPHINX_BUILD}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
DEPENDS
# Other docs files you want to track should go here (or in some variable)
${CMAKE_CURRENT_SOURCE_DIR}/index.rst
${CMAKE_CURRENT_SOURCE_DIR}/users-api.rst
${CMAKE_CURRENT_SOURCE_DIR}/install.rst
${CMAKE_CURRENT_SOURCE_DIR}/usage.rst
${CMAKE_CURRENT_SOURCE_DIR}/tutorial.rst
${CMAKE_CURRENT_SOURCE_DIR}/features.rst
${DOXYGEN_INDEX_FILE}
MAIN_DEPENDENCY ${SPHINX_SOURCE}/conf.py
COMMENT "Generating documentation with Sphinx")
# Nice named target so we can run the job easily
add_custom_target(Sphinx ALL DEPENDS ${SPHINX_INDEX_FILE})

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,53 @@
import subprocess, os
def configureDoxyfile(input_dir, output_dir):
with open('Doxyfile.in', 'r') as file:
filedata = file.read()
filedata = filedata.replace('@DOXYGEN_INPUT_DIR@', input_dir)
filedata = filedata.replace('@DOXYGEN_OUTPUT_DIR@', output_dir)
with open('Doxyfile', 'w') as file:
file.write(filedata)
# Check if we're running on Read the Docs' servers
read_the_docs_build = os.environ.get('READTHEDOCS', None) == 'True'
breathe_projects = {}
if read_the_docs_build:
input_dir = '../quill'
output_dir = 'build'
configureDoxyfile(input_dir, output_dir)
subprocess.call('doxygen', shell=True)
breathe_projects['Quill'] = output_dir + '/xml'
# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = 'Quill'
copyright = '2024, Odysseas Georgoudis'
author = 'Odysseas Georgoudis'
release = 'v4.0.0'
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = ["breathe", "sphinx.ext.autosectionlabel"]
breathe_default_project = "Quill"
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']

View file

@ -0,0 +1,139 @@
<mxfile host="app.diagrams.net" modified="2024-05-16T00:07:38.477Z" agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36 Edg/124.0.0.0" etag="VW8ihnyupRLc_PgVfbaT" version="22.1.21" type="device">
<diagram name="Page-1" id="kLzpoFuRwS6fnHKDuZMH">
<mxGraphModel dx="2066" dy="1145" grid="1" gridSize="12" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="1000" pageHeight="1200" math="0" shadow="0">
<root>
<mxCell id="0" />
<mxCell id="1" parent="0" />
<mxCell id="BmINEKKr7EmUC2mtGokD-1" value="hot thread" style="ellipse;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="258" y="66" width="100" height="70" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-2" value="hot thread" style="ellipse;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="653" y="66" width="100" height="70" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-5" value="" style="shape=waypoint;sketch=0;fillStyle=solid;size=6;pointerEvents=1;points=[];fillColor=none;resizable=0;rotatable=0;perimeter=centerPerimeter;snapToPoint=1;" parent="1" vertex="1">
<mxGeometry x="498" y="91" width="20" height="20" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-6" value="" style="shape=waypoint;sketch=0;fillStyle=solid;size=6;pointerEvents=1;points=[];fillColor=none;resizable=0;rotatable=0;perimeter=centerPerimeter;snapToPoint=1;" parent="1" vertex="1">
<mxGeometry x="576.5" y="91" width="20" height="20" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-7" value="" style="shape=waypoint;sketch=0;fillStyle=solid;size=6;pointerEvents=1;points=[];fillColor=none;resizable=0;rotatable=0;perimeter=centerPerimeter;snapToPoint=1;" parent="1" vertex="1">
<mxGeometry x="418" y="91" width="20" height="20" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-10" value="thread local context&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;" style="verticalLabelPosition=middle;verticalAlign=middle;html=1;shape=mxgraph.basic.rect;fillColor2=none;strokeWidth=1;size=26.67;indent=5;labelPosition=center;align=center;" parent="1" vertex="1">
<mxGeometry x="108" y="246" width="280" height="190" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-14" value="SPSC queue&lt;br&gt;(bounded or unbounded)" style="verticalLabelPosition=bottom;verticalAlign=top;html=1;shape=mxgraph.basic.donut;dx=25;fillColor=#fff2cc;strokeColor=#000000;container=0;perimeterSpacing=1;labelPosition=center;align=center;whiteSpace=wrap;" parent="1" vertex="1">
<mxGeometry x="268" y="291" width="90" height="90" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-15" value="" style="endArrow=classic;html=1;rounded=0;exitX=0.5;exitY=1;exitDx=0;exitDy=0;dashed=1;dashPattern=12 12;labelPosition=left;verticalLabelPosition=top;align=right;verticalAlign=bottom;entryX=0.46;entryY=0.026;entryDx=0;entryDy=0;entryPerimeter=0;" parent="1" source="BmINEKKr7EmUC2mtGokD-1" target="BmINEKKr7EmUC2mtGokD-14" edge="1">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="458" y="306" as="sourcePoint" />
<mxPoint x="478" y="266" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-16" value="TransitEvent buffer (unbounded)" style="verticalLabelPosition=bottom;verticalAlign=top;html=1;shape=mxgraph.basic.donut;dx=25;fillColor=#e1d5e7;strokeColor=#000000;container=0;shadow=0;perimeterSpacing=1;whiteSpace=wrap;" parent="1" vertex="1">
<mxGeometry x="128" y="291" width="90" height="90" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-19" value="1. A hot thread pushes a header and a copy of (args...) to a queue" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
<mxGeometry x="403" y="176" width="210" height="30" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-20" value="Backend logging thread&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;" style="ellipse;whiteSpace=wrap;html=1;shadow=0;sketch=0;strokeColor=#000000;" parent="1" vertex="1">
<mxGeometry x="268" y="694" width="520" height="440" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-24" value="thread local context&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;&lt;br&gt;" style="verticalLabelPosition=middle;verticalAlign=middle;html=1;shape=mxgraph.basic.rect;fillColor2=none;strokeWidth=1;size=26.67;indent=5;labelPosition=center;align=center;" parent="1" vertex="1">
<mxGeometry x="628" y="246" width="280" height="190" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-25" value="SPSC queue&lt;br&gt;(bounded or unbounded)" style="verticalLabelPosition=bottom;verticalAlign=top;html=1;shape=mxgraph.basic.donut;dx=25;fillColor=#fff2cc;strokeColor=#000000;container=0;perimeterSpacing=1;labelPosition=center;align=center;whiteSpace=wrap;" parent="1" vertex="1">
<mxGeometry x="658" y="296" width="90" height="90" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-26" value="TransitEvent buffer (unbounded)" style="verticalLabelPosition=bottom;verticalAlign=top;html=1;shape=mxgraph.basic.donut;dx=25;fillColor=#e1d5e7;strokeColor=#000000;container=0;shadow=0;perimeterSpacing=1;whiteSpace=wrap;" parent="1" vertex="1">
<mxGeometry x="788" y="296" width="90" height="90" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-27" value="" style="endArrow=classic;html=1;rounded=0;exitX=0.5;exitY=1;exitDx=0;exitDy=0;dashed=1;dashPattern=12 12;labelPosition=left;verticalLabelPosition=top;align=right;verticalAlign=bottom;entryX=0.511;entryY=0.001;entryDx=0;entryDy=0;entryPerimeter=0;" parent="1" source="BmINEKKr7EmUC2mtGokD-2" target="BmINEKKr7EmUC2mtGokD-25" edge="1">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="388" y="171" as="sourcePoint" />
<mxPoint x="340.0857142857144" y="300" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-29" value="" style="shape=waypoint;sketch=0;fillStyle=solid;size=6;pointerEvents=1;points=[];fillColor=none;resizable=0;rotatable=0;perimeter=centerPerimeter;snapToPoint=1;" parent="1" vertex="1">
<mxGeometry x="498" y="316" width="20" height="20" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-31" value="" style="shape=waypoint;sketch=0;fillStyle=solid;size=6;pointerEvents=1;points=[];fillColor=none;resizable=0;rotatable=0;perimeter=centerPerimeter;snapToPoint=1;" parent="1" vertex="1">
<mxGeometry x="576.5" y="316" width="20" height="20" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-32" value="" style="shape=waypoint;sketch=0;fillStyle=solid;size=6;pointerEvents=1;points=[];fillColor=none;resizable=0;rotatable=0;perimeter=centerPerimeter;snapToPoint=1;" parent="1" vertex="1">
<mxGeometry x="418" y="316" width="20" height="20" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-34" value="2. The backend thread pops all messages from all queues, decodes them and stores&amp;nbsp;&lt;br&gt;them in the TransitEvent buffer" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
<mxGeometry x="418" y="456" width="194" height="84" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-61" value="" style="rounded=0;whiteSpace=wrap;html=1;shadow=0;sketch=0;strokeColor=#000000;" parent="1" vertex="1">
<mxGeometry x="473" y="800" width="110" height="160" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-43" value="3.TransitEvents are processed after all SPSC queues are empty or when a max limit is reached. The TransitEvent with the min timestamp is passed to the all the registered Sinks." style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
<mxGeometry x="108" y="648" width="240" height="90" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-46" value="" style="rounded=1;whiteSpace=wrap;html=1;shadow=0;sketch=0;strokeColor=#000000;" parent="1" vertex="1">
<mxGeometry x="508" y="910" width="40" height="40" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-45" value="" style="rounded=1;whiteSpace=wrap;html=1;shadow=0;sketch=0;strokeColor=#000000;" parent="1" vertex="1">
<mxGeometry x="508" y="826" width="40" height="40" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-48" value="" style="shape=waypoint;sketch=0;fillStyle=solid;size=6;pointerEvents=1;points=[];fillColor=none;resizable=0;rotatable=0;perimeter=centerPerimeter;snapToPoint=1;" parent="1" vertex="1">
<mxGeometry x="518" y="870" width="20" height="20" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-49" value="" style="shape=waypoint;sketch=0;fillStyle=solid;size=6;pointerEvents=1;points=[];fillColor=none;resizable=0;rotatable=0;perimeter=centerPerimeter;snapToPoint=1;" parent="1" vertex="1">
<mxGeometry x="518" y="890" width="20" height="20" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-52" value="Sinks" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" parent="1" vertex="1">
<mxGeometry x="498" y="800" width="60" height="30" as="geometry" />
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-53" value="" style="endArrow=classic;html=1;rounded=0;dashed=1;dashPattern=12 12;strokeColor=default;exitX=0.822;exitY=0.87;exitDx=0;exitDy=0;exitPerimeter=0;entryX=0.419;entryY=0.008;entryDx=0;entryDy=0;entryPerimeter=0;" parent="1" source="BmINEKKr7EmUC2mtGokD-14" target="BmINEKKr7EmUC2mtGokD-20" edge="1">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="458" y="516" as="sourcePoint" />
<mxPoint x="538" y="666" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-54" value="" style="endArrow=classic;html=1;rounded=0;dashed=1;dashPattern=12 12;strokeColor=default;exitX=0.156;exitY=0.862;exitDx=0;exitDy=0;exitPerimeter=0;entryX=0.571;entryY=0;entryDx=0;entryDy=0;entryPerimeter=0;" parent="1" source="BmINEKKr7EmUC2mtGokD-25" target="BmINEKKr7EmUC2mtGokD-20" edge="1">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="321.98799999999983" y="392" as="sourcePoint" />
<mxPoint x="598" y="666" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-56" value="" style="endArrow=classic;html=1;rounded=0;dashed=1;dashPattern=12 12;strokeColor=default;entryX=0.909;entryY=0.826;entryDx=0;entryDy=0;entryPerimeter=0;exitX=0.356;exitY=0.006;exitDx=0;exitDy=0;exitPerimeter=0;" parent="1" source="BmINEKKr7EmUC2mtGokD-20" target="BmINEKKr7EmUC2mtGokD-16" edge="1">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="518" y="666" as="sourcePoint" />
<mxPoint x="508" y="466" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-57" value="" style="endArrow=classic;html=1;rounded=0;dashed=1;dashPattern=12 12;strokeColor=default;entryX=0.17;entryY=0.928;entryDx=0;entryDy=0;entryPerimeter=0;exitX=0.637;exitY=0.003;exitDx=0;exitDy=0;exitPerimeter=0;" parent="1" source="BmINEKKr7EmUC2mtGokD-20" target="BmINEKKr7EmUC2mtGokD-26" edge="1">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="568" y="656" as="sourcePoint" />
<mxPoint x="220.62800000000016" y="375.99199999999996" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-58" value="" style="endArrow=classic;html=1;rounded=0;dashed=1;dashPattern=12 12;strokeColor=default;exitX=0.442;exitY=1.051;exitDx=0;exitDy=0;exitPerimeter=0;entryX=0.25;entryY=0;entryDx=0;entryDy=0;" parent="1" source="BmINEKKr7EmUC2mtGokD-16" target="BmINEKKr7EmUC2mtGokD-61" edge="1">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="352.6239999999998" y="380.03999999999996" as="sourcePoint" />
<mxPoint x="488" y="826" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-59" value="" style="endArrow=classic;html=1;rounded=0;dashed=1;dashPattern=12 12;strokeColor=default;exitX=0.812;exitY=0.899;exitDx=0;exitDy=0;exitPerimeter=0;entryX=0.827;entryY=0.007;entryDx=0;entryDy=0;entryPerimeter=0;" parent="1" source="BmINEKKr7EmUC2mtGokD-26" target="BmINEKKr7EmUC2mtGokD-61" edge="1">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="177.66400000000021" y="396.692" as="sourcePoint" />
<mxPoint x="508" y="826" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-62" value="" style="endArrow=classic;html=1;rounded=0;dashed=1;dashPattern=12 12;strokeColor=default;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" parent="1" source="BmINEKKr7EmUC2mtGokD-61" target="BmINEKKr7EmUC2mtGokD-65" edge="1">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="458" y="766" as="sourcePoint" />
<mxPoint x="502" y="1036" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="BmINEKKr7EmUC2mtGokD-65" value="4. Sink output&amp;nbsp;" style="rounded=1;whiteSpace=wrap;html=1;shadow=0;sketch=0;strokeColor=#000000;" parent="1" vertex="1">
<mxGeometry x="432.5" y="1006" width="191" height="90" as="geometry" />
</mxCell>
</root>
</mxGraphModel>
</diagram>
</mxfile>

Binary file not shown.

After

Width:  |  Height:  |  Size: 89 KiB

View file

@ -0,0 +1,86 @@
.. _features:
##############################################################################
Features
##############################################################################
Thread Safety
=============
All components and API offered to the user is intended to be thread-safe without any special work needing to be done.
:cpp:class:`quill::LoggerImpl` are thread safe by default. The same instance can be used to log by any thread.
Any thread can safely modify the active log level of the logger.
Logging types
=====================================================
For primitive types, std::string, and std::string_view, the library will perform a deep copy, and all formatting will occur asynchronously in the backend thread.
However, for standard library types or user-defined types, they need to be converted to a string beforehand before being passed to the logger.
Guaranteed logging
=======================
Quill employs a thread-local single-producer-single-consumer queue to relay logs to the backend thread,
ensuring that log messages are never dropped.
Initially, an unbounded queue with a small size is used for performance optimization.
However, if the queue reaches full capacity, a new queue will be allocated, incurring a slight performance penalty for the frontend.
The default unbounded queue can expand up to a size of 2GB. Should this limit be reached, the frontend thread will block.
However, it's possible to alter the queue type within the FrontendOptions.
Customising the queue size and type
--------------------------------------
The queue size and type is configurable in runtime by creating a custom FrontendOptions class.
Flush
===============================
You can explicitly instruct the frontend thread to wait until the log is flushed.
.. note:: The thread that calls :cpp:func:`flush_log` will **block** until every message up to that point is flushed.
.. doxygenfunction:: flush_log
Application Crash Policy
========================
When the program is terminated gracefully, quill will go through its destructor where all messages are guaranteed to be logged.
However, if the applications crashes, log messages can be lost.
To avoid losing messages when the application crashes due to a signal interrupt the user must setup its own signal
handler and call :cpp:func:`flush_log` inside the signal handler.
There is a built-in signal handler that offers this crash-safe behaviour and can be enabled in :cpp:func:`start_with_signal_handler<quill::FrontendOptions>`
Log Messages Timestamp Order
==============================
Quill creates a single worker backend thread which orders the messages in all queues by timestamp before printing them to the log file.
Number of Backend Threads
============================
Quill focus is on low latency and not high throughput. Therefore, there is only one backend thread that will process all logs.
Latency of the first log message
====================================
A queue and an internal buffer will be allocated on the first log message of each thread. If the latency of the first
log message is important it is recommended to call :cpp:func:`quill::preallocate`
.. doxygenfunction:: preallocate()
Configuration
======================
Quill offers a few customization options, which are also well-documented.
This customization can be applied to either the frontend or the backend.
Frontend configuration occurs at compile time, thus requiring a custom FrontendOptions class to be provided
:cpp:class:`quill::FrontendOptions`
For customizing the backend, refer to :cpp:class:`quill::BackendOptions`

View file

@ -0,0 +1,26 @@
.. _index:
##############################################################################
Quill - Asynchronous Low Latency Logging Library for C++
##############################################################################
Quill is an open source, cross platform C++17 logging library designed for latency sensitive applications.
`Github <http://github.com/odygrd/quill>`_
.. toctree::
:hidden:
:maxdepth: 2
:caption: General Information
install
tutorial
features
usage
.. toctree::
:hidden:
:maxdepth: 2
:caption: API
users-api

View file

@ -0,0 +1,82 @@
.. _install:
##############################################################################
Install
##############################################################################
Package Managers
=================
====================== ======================= ===================
Homebrew vcpkg Conan
====================== ======================= ===================
``brew install quill`` ``vcpkg install quill`` ``quill/[>=1.2.3]``
====================== ======================= ===================
CMake-Integration
=================
External
--------
Building and Installing Quill as Static Library
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code:: bash
git clone https://github.com/odygrd/quill.git
mkdir cmake_build
cd cmake_build
cmake ..
make install
Note: To install in custom directory invoke cmake with ``-DCMAKE_INSTALL_PREFIX=/quill/install-dir/``
Then use the library from a CMake project, you can locate it directly with ``find_package()``
Directory Structure
~~~~~~~~~~~~~~~~~~~
::
my_project/
├── CMakeLists.txt
├── main.cpp
CMakeLists.txt
~~~~~~~~~~~~~~
.. code:: cmake
# Set only if needed - quill was installed under a custom non-standard directory
set(CMAKE_PREFIX_PATH /test_quill/usr/local/)
find_package(quill REQUIRED)
# Linking your project against quill
add_executable(example main.cpp)
target_link_libraries(example PRIVATE quill::quill)
Embedded
--------
To embed the library directly, copy the source to your project and call ``add_subdirectory()`` in your ``CMakeLists.txt`` file
Directory Structure
~~~~~~~~~~~~~~~~~~~
::
my_project/
├── quill/ (source folder)
├── CMakeLists.txt
├── main.cpp
CMakeLists.txt
~~~~~~~~~~~~~~
.. code:: cmake
add_subdirectory(quill)
add_executable(my_project main.cpp)
target_link_libraries(my_project PRIVATE quill::quill)

View file

@ -0,0 +1,35 @@
@ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=.
set BUILDDIR=_build
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.https://www.sphinx-doc.org/
exit /b 1
)
if "%1" == "" goto help
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
:end
popd

Binary file not shown.

After

Width:  |  Height:  |  Size: 128 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 202 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 145 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 154 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 176 KiB

View file

@ -0,0 +1,2 @@
breathe
sphinx_rtd_theme

View file

@ -0,0 +1,525 @@
.. _tutorial:
##############################################################################
Tutorial
##############################################################################
Basic Example
=============
.. code:: cpp
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include "quill/sinks/ConsoleSink.h"
int main()
{
// Start the backend thread
quill::Backend::start();
// Frontend
auto console_sink = quill::Frontend::create_or_get_sink<quill::ConsoleSink>("sink_id_1");
quill::Logger* logger = quill::Frontend::create_or_get_logger("root", std::move(console_sink));
LOG_INFO(logger, "This is a log info example {}", 123);
}
In the above example a logger to ``stdout`` is created with it's name set to “root”.
Each :cpp:class:`quill::LoggerImpl` contains a :cpp:class:`quill::PatternFormatter` object which is responsible for the
formatting of the message.
Moreover, each :cpp:class:`quill::LoggerImpl` contains single or multiple :cpp:class:`quill::Sink` objects. The sink
objects actually deliver the log message to their output source.
A single backend thread is checking for new log messages periodically.
Starting the backend thread is the responsibility of the user. The backend thread will automatically stop at the end
of `main` printing every message, as long as the application is terminated gracefully.
Use of macros is unavoidable in order to achieve better runtime performance. The static information of a log
(such as format string, log level, location) is created in compile time. It is passed along with the type of each
argument to a decoding function. A template instantiation per log statement is created.
Logging Macros
================
The following macros are provided for logging:
.. c:macro:: LOG_TRACE_L3(logger, log_message_format, args)
.. c:macro:: LOG_TRACE_L2(logger, log_message_format, args)
.. c:macro:: LOG_TRACE_L1(logger, log_message_format, args)
.. c:macro:: LOG_DEBUG(logger, log_message_format, args)
.. c:macro:: LOG_INFO(logger, log_message_format, args)
.. c:macro:: LOG_WARNING(logger, log_message_format, args)
.. c:macro:: LOG_ERROR(logger, log_message_format, args)
.. c:macro:: LOG_CRITICAL(logger, log_message_format, args)
.. c:macro:: LOG_BACKTRACE(logger, log_message_format, args)
Sinks
========
Sinks are the objects responsible for writing logs to their respective targets.
A :cpp:class:`quill::Sink` object serves as the base class for various sink-derived classes.
Each sink handles outputting logs to a single target, such as a file, console, or database.
Upon creation, a sink object is registered and owned by a central manager object, the quill::detail::SinkManager.
For files, one sink is created per filename, and the file is opened once. If a sink is requested that refers to an already opened file, the existing Sink object is returned. Users can create multiple stdout or stderr handles by providing a unique ID per handle.
When creating a logger, one or more sinks for that logger can be specified. Sinks can only be registered during the logger creation.
Sharing sinks between loggers
==================================
It is possible to share the same sink object between multiple logger objects.
For example when all logger objects are writing to the same file. The following code is also thread-safe.
.. code:: cpp
auto file_sink = Frontend::create_or_get_sink<FileSink>(
filename,
[]()
{
FileSinkConfig cfg;
cfg.set_open_mode('w');
return cfg;
}(),
FileEventNotifier{});
quill::Logger* logger_a = Frontend::create_or_get_logger("logger_a", file_sink);
quill::Logger* logger_b = Frontend::create_or_get_logger("logger_b", file_sink);
Sink Types
==================================
ConsoleSink
--------------
The ``ConsoleSink`` class sends logging output to streams ``stdout`` or ``stderr``.
Printing colour codes to terminal or windows console is also supported.
FileHandler
-----------
Logging to file
~~~~~~~~~~~~~~~~~~~~~
.. code:: cpp
int main()
{
// Start the backend thread
quill::Backend::start();
// Frontend
auto file_sink = quill::Frontend::create_or_get_sink<quill::FileSink>(
"trivial_logging.log",
[]()
{
quill::FileSinkConfig cfg;
cfg.set_open_mode('w');
cfg.set_filename_append_option(quill::FilenameAppendOption::StartDateTime);
return cfg;
}(),
quill::FileEventNotifier{});
quill::Logger* logger =
quill::Frontend::create_or_get_logger("root", std::move(file_sink));
LOG_INFO(logger, "log something {}", 123);
}
RotatingFileSink
-------------------
Rotating log by size or time
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code:: cpp
// Start the backend thread
quill::Backend::start();
// Frontend
auto rotating_file_sink = quill::Frontend::create_or_get_sink<quill::RotatingFileSink>(
"rotating_file.log",
[]()
{
// See RotatingFileSinkConfig for more options
quill::RotatingFileSinkConfig cfg;
cfg.set_open_mode('w');
cfg.set_filename_append_option(quill::FilenameAppendOption::StartDateTime);
cfg.set_rotation_time_daily("18:30");
cfg.set_rotation_max_file_size(1024);
return cfg;
}());
quill::Logger* logger =
quill::Frontend::create_or_get_logger("root", std::move(rotating_file_sink));
for (int i = 0; i < 20; ++i)
{
LOG_INFO(logger, "Hello from rotating logger, index is {}", i);
}
JsonFileSink
-----------------------
Json log
~~~~~~~~~~~~~~~~~~~~~
.. code:: cpp
// Start the backend thread
quill::Backend::start();
// Frontend
// Create a json file for output
auto json_sink = quill::Frontend::create_or_get_sink<quill::JsonFileSink>(
"json_sink_logging.log",
[]()
{
quill::JsonFileSinkConfig cfg;
cfg.set_open_mode('w');
cfg.set_filename_append_option(quill::FilenameAppendOption::StartDateTime);
return cfg;
}(),
quill::FileEventNotifier{});
// When using the JsonFileSink, it is ideal to set the logging pattern to empty to avoid unnecessary message formatting.
quill::Logger* json_logger = quill::Frontend::create_or_get_logger(
"json_logger", std::move(json_sink), "", "%H:%M:%S.%Qns", quill::Timezone::GmtTime);
for (int i = 0; i < 2; ++i)
{
LOG_INFO(json_logger, "{method} to {endpoint} took {elapsed} ms", "POST", "http://", 10 * i);
}
// It is also possible to create a logger than logs to both the json file and stdout
// with the appropriate format
auto json_sink_2 = quill::Frontend::get_sink("json_sink_logging.log");
auto console_sink = quill::Frontend::create_or_get_sink<quill::ConsoleSink>("console_sink_id_1");
// We set a custom format pattern here to also include the named_args
quill::Logger* hybrid_logger = quill::Frontend::create_or_get_logger(
"hybrid_logger", {std::move(json_sink_2), std::move(console_sink)},
"%(time) [%(thread_id)] %(short_source_location:<28) LOG_%(log_level:<9) %(logger:<20) "
"%(message) [%(named_args)]");
for (int i = 2; i < 4; ++i)
{
LOG_INFO(hybrid_logger, "{method} to {endpoint} took {elapsed} ms", "POST", "http://", 10 * i);
}
Filters
==================================
A Filter class that can be used for filtering log records in the backend working thread.
This is a simple way to ensure that a logger or sink will only output desired log messages.
One or several :cpp:class:`quill::Filter` can be added to a :cpp:class:`quill::Sink` instance using the
:cpp:func:`void add_filter(std::unique_ptr<Filter> filter)`
The sink stores all added filters in a vector. The final log message is logged if all filters of the sink return `true`.
Filtering per sink
-----------------------
The below example logs all WARNING and higher log level messages to console and all INFO and lower level messages to a file.
.. code:: cpp
// Filter class for our file sink
class FileFilter : public quill::Filter
{
public:
FileFilter() : quill::Filter("FileFilter"){};
QUILL_NODISCARD bool filter(quill::MacroMetadata const* log_metadata, uint64_t log_timestamp, std::string_view thread_id,
std::string_view thread_name, std::string_view logger_name,
quill::LogLevel log_level, std::string_view log_message) noexcept override
{
if (log_metadata->log_level() < quill::LogLevel::Warning)
{
return true;
}
return false;
}
};
// Filter for the stdout sink
class StdoutFilter : public quill::Filter
{
public:
StdoutFilter() : quill::Filter("StdoutFilter"){};
QUILL_NODISCARD bool filter(quill::MacroMetadata const* log_metadata, uint64_t log_timestamp, std::string_view thread_id,
std::string_view thread_name, std::string_view logger_name,
quill::LogLevel log_level, std::string_view log_message) noexcept override
{
if (log_metadata->log_level() >= quill::LogLevel::Warning)
{
return true;
}
return false;
}
};
int main()
{
// Start the logging backend thread
quill::Backend::start();
// Get a sink to the file
// The first time this function is called a file sink is created for this filename.
// Calling the function with the same filename will return the existing sink
auto file_sink = quill::Frontend::create_or_get_sink<quill::FileSink>(
"example_filters.log",
[]()
{
quill::FileSinkConfig cfg;
cfg.set_open_mode('w');
cfg.set_filename_append_option(quill::FilenameAppendOption::StartDateTime);
return cfg;
}(),
quill::FileEventNotifier{});
// Create and add the filter to our sink
file_sink->add_filter(std::make_unique<FileFilter>());
// Also create an stdout sink
auto console_sink = quill::Frontend::create_or_get_sink<quill::ConsoleSink>("sink_id_1");
// Create and add the filter to our sink
console_sink->add_filter(std::make_unique<StdoutFilter>());
// Create a logger using this sink
quill::Logger* logger = quill::Frontend::create_or_get_logger("logger", {std::move(file_sink), std::move(console_sink)});
LOG_INFO(logger, "test");
LOG_ERROR(logger, "test");
}
Formatters
==================================
The :cpp:class:`quill::PatternFormatter` specifies the layout of log records in the final output.
Each :cpp:class:`quill::LoggerImpl` object owns a PatternFormatter object.
This means that each Logger can be customised to output in a different format.
Customising the format output only be done during the creation of the logger.
If no custom format is set each newly created Sink uses the same formatting as the default logger.
The format output can be customised by providing a string of certain
attributes.
+-------------------------+--------------------------+----------------------------------------+
| Name | Format | Description |
+=========================+==========================+========================================+
| time | %(time) | Human-readable time when the LogRecord |
| | | was created. By default this is of the |
| | | form '2003-07-08 16:49:45.896' (the |
| | | numbers after the period are the |
| | | millisecond portion of the time). |
+-------------------------+--------------------------+----------------------------------------+
| file_name | %(file_name) | Filename portion of pathname. |
+-------------------------+--------------------------+----------------------------------------+
| full_path | %(full_path) | Full path of the source file where the |
| | | logging call was issued. |
+-------------------------+--------------------------+----------------------------------------+
| caller_function | %(caller_function) | Name of function containing the |
| | | logging call. |
+-------------------------+--------------------------+----------------------------------------+
| log_level | %(log_level) | Text logging level for the message |
| | | (TRACEL3, TRACEL2, TRACEL1, |
| | | DEBUG, INFO, WARNING, ERROR, |
| | | CRITICAL, BACKTRACE). |
+-------------------------+--------------------------+----------------------------------------+
| log_level_id | %(log_level_id) | Abbreviated level name (T3, T2, |
| | | T1, D, I, W, E, C, BT). |
+-------------------------+--------------------------+----------------------------------------+
| line_number | %(line_number) | Source line number where the logging |
| | | call was issued (if available). |
+-------------------------+--------------------------+----------------------------------------+
| logger | %(logger) | Name of the logger used to log the |
| | | call. |
+-------------------------+--------------------------+----------------------------------------+
| message | %(message) | The logged message, computed as msg % |
| | | args. This is set when Formatter. |
| | | format() is invoked. |
+-------------------------+--------------------------+----------------------------------------+
| thread_id | %(thread_id) | Thread ID (if available). |
+-------------------------+--------------------------+----------------------------------------+
| thread_name | %(thread_name) | Thread name if set. The name of the |
| | | thread must be set prior to issuing |
| | | any log statement on that thread. |
+-------------------------+--------------------------+----------------------------------------+
| process_id | %(process_id) | Process ID |
+-------------------------+--------------------------+----------------------------------------+
| source_location | %(source_location) | Full source file path and line number |
| | | as a single string |
+-------------------------+--------------------------+----------------------------------------+
| short_source_location | %(short_source_location) | Full source file path and line |
| | | number as a single string |
+-------------------------+--------------------------+----------------------------------------+
| tags | %(tags) | Additional custom tags appended to the |
| | | message when _WITH_TAGS macros are |
| | | used. |
+-------------------------+--------------------------+----------------------------------------+
| named_args | %(named_args) | Key-value pairs appended to the |
| | | message. Only applicable with |
| | | for a named args log format; |
| | | remains empty otherwise. |
+-------------------------+--------------------------+----------------------------------------+
Customising the timestamp
-----------------------------
The timestamp is customisable by :
- Format. Same format specifiers as ``strftime(...)`` format without the additional ``.Qms`` ``.Qus`` ``.Qns`` arguments.
- Local timezone or GMT timezone. Local timezone is used by default.
- Fractional second precision. Using the additional fractional second specifiers in the timestamp format string.
========= ============
Specifier Description
========= ============
%Qms Milliseconds
%Qus Microseconds
%Qns Nanoseconds
========= ============
By default ``"%H:%M:%S.%Qns"`` is used.
.. note:: MinGW does not support all ``strftime(...)`` format specifiers and you might get a ``bad alloc`` if the format specifier is not supported
Setting a custom format for logging to stdout
----------------------------------------------------------
.. code:: cpp
quill::Logger* logger =
quill::Frontend::create_or_get_logger("root", std::move(sink),
"%(time) [%(thread_id)] %(short_source_location:<28) "
"LOG_%(log_level:<9) %(logger:<12) %(message)",
"%H:%M:%S.%Qns", quill::Timezone::GmtTime);
Logger
-----------------------------
Logger instances can be created by the user with the desired name, sinks and formatter.
The logger object are never instantiated directly. Instead they first have to get created
:cpp:func:`Frontend::create_or_get_logger(std::string const& logger_name, std::shared_ptr<Sink> sink, std::string const& format_pattern = "%(time) [%(thread_id)] %(short_source_location:<28) LOG_%(log_level:<9) %(logger:<12) %(message)", std::string const& time_pattern = "%H:%M:%S.%Qns", Timezone timestamp_timezone = Timezone::LocalTime, ClockSourceType clock_source = ClockSourceType::Tsc, UserClockSource* user_clock = nullptr)`
:cpp:func:`Frontend::create_or_get_logger(std::string const& logger_name, std::initializer_list<std::shared_ptr<Sink>> sinks, std::string const& format_pattern = "%(time) [%(thread_id)] %(short_source_location:<28) LOG_%(log_level:<9) %(logger:<12) %(message)", std::string const& time_pattern = "%H:%M:%S.%Qns", Timezone timestamp_timezone = Timezone::LocalTime, ClockSourceType clock_source = ClockSourceType::Tsc, UserClockSource* user_clock = nullptr)`
Logger access
-----------------------------
:cpp:func:`Frontend::get_logger(std::string const& name)`
Logger creation
-----------------------------
.. code:: cpp
auto console_sink = quill::Frontend::create_or_get_sink<quill::ConsoleSink>("sink_id_1");
quill::Logger* logger = quill::Frontend::create_or_get_logger("root", std::move(console_sink));
LOG_INFO(logger, "Hello from {}", "library foo");
Avoiding the use of Logger objects
---------------------------------------
For some applications the use of the single root logger might be enough. In that case passing the logger everytime
to the macro becomes inconvenient. The solution is to store the created Logger as a static variable and create your
own macros. See `example <https://github.com/odygrd/quill/blob/master/examples/recommended_usage/quill_wrapper/include/quill_wrapper/overwrite_macros.h>`_
Backtrace Logging
====================
Backtrace logging enables log messages to be stored in a ring buffer and either
- displayed later on demand or
- when a high severity log message is logged
Backtrace logging needs to be enabled first on the instance of :cpp:class:`quill::LoggerImpl`
.. doxygenfunction:: init_backtrace
.. doxygenfunction:: flush_backtrace
.. note:: Backtrace log messages store the original timestamp of the message. Since they are kept and flushed later the
timestamp in the log file will be out of order.
.. note:: Backtrace log messages are still pushed to the SPSC queue from the frontend to the backend.
Store messages in the ring buffer and display them when ``LOG_ERROR`` is logged
--------------------------------------------------------------------------------------------------------------------
.. code:: cpp
// a LOG_ERROR(...) or higher severity log message occurs via this logger.
// Enable the backtrace with a max ring buffer size of 2 messages which will get flushed when
// Backtrace has to be enabled only once in the beginning before calling LOG_BACKTRACE(...) for the first time.
logger->init_backtrace(2, quill::LogLevel::Error);
LOG_INFO(logger, "BEFORE backtrace Example {}", 1);
LOG_BACKTRACE(logger, "Backtrace log {}", 1);
LOG_BACKTRACE(logger, "Backtrace log {}", 2);
LOG_BACKTRACE(logger, "Backtrace log {}", 3);
LOG_BACKTRACE(logger, "Backtrace log {}", 4);
// Backtrace is not flushed yet as we requested to flush on errors
LOG_INFO(logger, "AFTER backtrace Example {}", 1);
// log message with severity error - This will also flush_sink the backtrace which has 2 messages
LOG_ERROR(logger, "An error has happened, Backtrace is also flushed.");
// The backtrace is flushed again after LOG_ERROR but in this case it is empty
LOG_ERROR(logger, "An second error has happened, but backtrace is now empty.");
// Log more backtrace messages
LOG_BACKTRACE(logger, "Another Backtrace log {}", 1);
LOG_BACKTRACE(logger, "Another Backtrace log {}", 2);
// Nothing is logged at the moment
LOG_INFO(logger, "Another log info");
// Still nothing logged - the error message is on a different logger object
quill::LoggerImpl* logger_2 = quill::get_logger("example_1_1");
LOG_CRITICAL(logger_2, "A critical error from different logger.");
// The new backtrace is flushed again due to LOG_CRITICAL
LOG_CRITICAL(logger, "A critical error from the logger we had a backtrace.");
Store messages in the ring buffer and display them on demand
--------------------------------------------------------------------------------------------------------------------
.. code:: cpp
// Store maximum of two log messages. By default they will never be flushed since no LogLevel severity is specified
logger->init_backtrace(2);
LOG_INFO(logger, "BEFORE backtrace Example {}", 2);
LOG_BACKTRACE(logger, "Backtrace log {}", 100);
LOG_BACKTRACE(logger, "Backtrace log {}", 200);
LOG_BACKTRACE(logger, "Backtrace log {}", 300);
LOG_INFO(logger, "AFTER backtrace Example {}", 2);
// an error has happened - flush_log_messages the backtrace manually
logger->flush_backtrace();

View file

@ -0,0 +1,91 @@
.. _usage:
##############################################################################
Usage
##############################################################################
Quickstart
===========
.. code:: cpp
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include "quill/sinks/ConsoleSink.h"
#include <string>
#include <utility>
/**
* Trivial logging example to console
*/
int main()
{
// Start the backend thread
quill::BackendOptions backend_options;
quill::Backend::start(backend_options);
// Frontend
auto console_sink = quill::Frontend::create_or_get_sink<quill::ConsoleSink>("sink_id_1");
quill::Logger* logger = quill::Frontend::create_or_get_logger("root", std::move(console_sink));
// Change the LogLevel to print everything
logger->set_log_level(quill::LogLevel::TraceL3);
LOG_TRACE_L3(logger, "This is a log trace l3 example {}", 1);
LOG_TRACE_L2(logger, "This is a log trace l2 example {} {}", 2, 2.3);
LOG_TRACE_L1(logger, "This is a log trace l1 {} example", "string");
LOG_DEBUG(logger, "This is a log debug example {}", 4);
LOG_INFO(logger, "This is a log info example {}", sizeof(std::string));
LOG_WARNING(logger, "This is a log warning example {}", sizeof(std::string));
LOG_ERROR(logger, "This is a log error example {}", sizeof(std::string));
LOG_CRITICAL(logger, "This is a log critical example {}", sizeof(std::string));
}
Log to file
======================
.. code:: cpp
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include "quill/sinks/FileSink.h"
#include <utility>
int main()
{
// Start the backend thread
quill::BackendOptions backend_options;
quill::Backend::start(backend_options);
// Frontend
auto file_sink = quill::Frontend::create_or_get_sink<quill::FileSink>(
"trivial_logging.log",
[]()
{
quill::FileSinkConfig cfg;
cfg.set_open_mode('w');
cfg.set_filename_append_option(quill::FilenameAppendOption::StartDateTime);
return cfg;
}(),
quill::FileEventNotifier{});
quill::Logger* logger =
quill::Frontend::create_or_get_logger("root", std::move(file_sink),
"%(time) [%(thread_id)] %(short_source_location:<28) "
"LOG_%(log_level:<9) %(logger:<12) %(message)",
"%H:%M:%S.%Qns", quill::Timezone::GmtTime);
// set the log level of the logger to debug (default is info)
logger->set_log_level(quill::LogLevel::Debug);
LOG_INFO(logger, "log something {}", 123);
LOG_DEBUG(logger, "something else {}", 456);
}

View file

@ -0,0 +1,70 @@
.. _users-api:
##############################################################################
User's API
##############################################################################
Backend Options
=====================
.. doxygenstruct:: quill::BackendOptions
:members:
Backend Class
=====================
.. doxygenclass:: quill::Backend
:members:
Frontend Options
=====================
.. doxygenstruct:: quill::FrontendOptions
:members:
Frontend Class
=====================
.. doxygenclass:: quill::FrontendImpl
:members:
Log Levels
=====================
.. doxygenenum:: quill::LogLevel
Logger Class
=====================
.. doxygenclass:: quill::LoggerImpl
:members:
PatternFormatter Class
=========================
.. doxygenclass:: quill::PatternFormatter
:members:
Sink Class
=====================
.. doxygenclass:: quill::Sink
:members:
Filter Class
=====================
.. doxygenclass:: quill::Filter
:members:
FileSinkConfig Class
=====================
.. doxygenclass:: quill::FileSinkConfig
:members:
RotatingFileSinkConfig Class
==============================
.. doxygenclass:: quill::RotatingFileSinkConfig
:members:

View file

@ -0,0 +1,74 @@
add_subdirectory(recommended_usage)
add_subdirectory(advanced)
add_executable(quill_example_backend_thread_notify backend_thread_notify.cpp)
set_common_compile_options(quill_example_backend_thread_notify)
target_link_libraries(quill_example_backend_thread_notify quill)
add_executable(quill_example_backtrace_logging backtrace_logging.cpp)
set_common_compile_options(quill_example_backtrace_logging)
target_link_libraries(quill_example_backtrace_logging quill)
add_executable(quill_example_bounded_dropping_queue_frontend bounded_dropping_queue_frontend.cpp)
set_common_compile_options(quill_example_bounded_dropping_queue_frontend)
target_link_libraries(quill_example_bounded_dropping_queue_frontend quill)
add_executable(quill_example_console_logging console_logging.cpp)
set_common_compile_options(quill_example_console_logging)
target_link_libraries(quill_example_console_logging quill)
add_executable(quill_example_custom_console_colours custom_console_colours.cpp)
set_common_compile_options(quill_example_custom_console_colours)
target_link_libraries(quill_example_custom_console_colours quill)
add_executable(quill_example_rotating_file_logging rotating_file_logging.cpp)
set_common_compile_options(quill_example_rotating_file_logging)
target_link_libraries(quill_example_rotating_file_logging quill)
add_executable(quill_example_signal_handler signal_handler.cpp)
set_common_compile_options(quill_example_signal_handler)
target_link_libraries(quill_example_signal_handler quill)
add_executable(quill_example_logger_removal_with_file_event_notifier logger_removal_with_file_event_notifier.cpp)
set_common_compile_options(quill_example_logger_removal_with_file_event_notifier)
target_link_libraries(quill_example_logger_removal_with_file_event_notifier quill)
add_executable(quill_example_custom_frontend_options custom_frontend_options.cpp)
set_common_compile_options(quill_example_custom_frontend_options)
target_link_libraries(quill_example_custom_frontend_options quill)
add_executable(quill_example_file_logging file_logging.cpp)
set_common_compile_options(quill_example_file_logging)
target_link_libraries(quill_example_file_logging quill)
add_executable(quill_example_filter_logging filter_logging.cpp)
set_common_compile_options(quill_example_filter_logging)
target_link_libraries(quill_example_filter_logging quill)
add_executable(quill_example_system_clock_logging system_clock_logging.cpp)
set_common_compile_options(quill_example_system_clock_logging)
target_link_libraries(quill_example_system_clock_logging quill)
add_executable(quill_example_user_clock_source user_clock_source.cpp)
set_common_compile_options(quill_example_user_clock_source)
target_link_libraries(quill_example_user_clock_source quill)
add_executable(quill_example_user_defined_filter user_defined_filter.cpp)
set_common_compile_options(quill_example_user_defined_filter)
target_link_libraries(quill_example_user_defined_filter quill)
add_executable(quill_example_user_defined_sink user_defined_sink.cpp)
set_common_compile_options(quill_example_user_defined_sink)
target_link_libraries(quill_example_user_defined_sink quill)
add_executable(quill_example_tags_logging tags_logging.cpp)
set_common_compile_options(quill_example_tags_logging)
target_link_libraries(quill_example_tags_logging quill)
add_executable(quill_example_json_sink_logging json_sink_logging.cpp)
set_common_compile_options(quill_example_json_sink_logging)
target_link_libraries(quill_example_json_sink_logging quill)
add_executable(quill_example_user_defined_types_logging user_defined_types_logging.cpp)
set_common_compile_options(quill_example_user_defined_types_logging)
target_link_libraries(quill_example_user_defined_types_logging quill)

View file

@ -0,0 +1,5 @@
add_subdirectory(quill_wrapper)
add_executable(quill_example_advanced advanced.cpp)
set_common_compile_options(quill_example_advanced)
target_link_libraries(quill_example_advanced quill_wrapper_advanced)

View file

@ -0,0 +1,50 @@
/**
* This example showcases passing user-defined types as arguments to the logger, with their
* formatting deferred asynchronously to the backend. It's particularly useful in scenarios where
* string formatting latency is unacceptable and the code operates on the critical path.
*
* For a more straightforward approach, it's generally recommended to pass these types as strings,
* formatting them in the frontend, as demonstrated in the 'user_defined_types_logging.cpp' example.
*/
// Include our wrapper lib for setup_quill
#include "quill_wrapper/quill_wrapper.h"
// Header required for quill::Frontend::get_logger
#include "quill/Frontend.h"
// We need only these two headers in order to log
#include "quill/LogMacros.h"
#include "quill/Logger.h"
// user defined type header
#include "user.h"
// user defined type codec header
#include "user_quill_codec.h"
// Required only when passing to logger std::vector<User> for offloading the formatting to the backend
#include "quill/std/Vector.h"
int main()
{
setup_quill("recommended_usage.log");
quill::Logger* logger = quill::Frontend::get_logger("root");
User user;
user.name = "Quill";
user.surname = "Library";
user.age = 4;
user.favorite_colors[0] = "red";
user.favorite_colors[1] = "green";
user.favorite_colors[2] = "blue";
LOG_INFO(logger, "The user is {}", user);
std::vector<User> const users = {{"Alice", "Doe", 25, {"red", "green"}},
{"Bob", "Smith", 30, {"blue", "yellow"}},
{"Charlie", "Johnson", 35, {"green", "orange"}},
{"David", "Brown", 40, {"red", "blue", "yellow"}}};
LOG_INFO(logger, "The users are {}", users);
}

View file

@ -0,0 +1,16 @@
set(LIB_NAME quill_wrapper_advanced)
add_library(${LIB_NAME} STATIC
include/quill_wrapper/quill_wrapper.h
include/quill_wrapper/quill_wrapper.cpp)
# Add include directories for this library
target_include_directories(${LIB_NAME}
PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:include>
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR})
# Link quill dependency
target_link_libraries(${LIB_NAME} PUBLIC quill::quill)

View file

@ -0,0 +1,21 @@
#include "quill_wrapper.h"
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/Logger.h"
#include "quill/sinks/ConsoleSink.h"
void setup_quill(char const* log_file)
{
// Start the backend thread
quill::Backend::start();
// Setup sink and logger
auto console_sink = quill::Frontend::create_or_get_sink<quill::ConsoleSink>("console_sink");
// Create and store the logger
quill::Frontend::create_or_get_logger("root", std::move(console_sink),
"%(time) [%(thread_id)] %(short_source_location:<28) "
"LOG_%(log_level:<9) %(logger:<12) %(message)",
"%H:%M:%S.%Qns", quill::Timezone::GmtTime);
}

View file

@ -0,0 +1,3 @@
#pragma once
void setup_quill(char const* log_file);

View file

@ -0,0 +1,16 @@
#pragma once
#include <array>
#include <cstdint>
#include <string>
/**
* User defined type
*/
struct User
{
std::string name;
std::string surname;
uint32_t age;
std::array<std::string, 3> favorite_colors;
};

View file

@ -0,0 +1,85 @@
#pragma once
// Always required
#include "quill/core/Codec.h"
#include "quill/core/DynamicFormatArgStore.h"
// To serialise the std::array member of User you need Array.h otherwise you don't need to include this
#include "quill/std/Array.h"
#include "user.h"
#include <utility> // for declval only required if you do the decoding manualy and use declval
/***/
template <>
struct fmtquill::formatter<User>
{
template <typename FormatContext>
constexpr auto parse(FormatContext& ctx)
{
return ctx.begin();
}
template <typename FormatContext>
auto format(::User const& user, FormatContext& ctx) const
{
return fmtquill::format_to(ctx.out(), "Name: {}, Surname: {}, Age: {}, Favorite Colors: {}",
user.name, user.surname, user.age, user.favorite_colors);
}
};
/***/
template <>
struct quill::detail::ArgSizeCalculator<User>
{
static size_t calculate(std::vector<size_t>& conditional_arg_size_cache, ::User const& user) noexcept
{
// pass as arguments the class members you want to serialize
return calculate_total_size(conditional_arg_size_cache, user.name, user.surname, user.age, user.favorite_colors);
}
};
/***/
template <>
struct quill::detail::Encoder<User>
{
static void encode(std::byte*& buffer, std::vector<size_t> const& conditional_arg_size_cache,
uint32_t& conditional_arg_size_cache_index, ::User const& user) noexcept
{
// You must encode the same members and in the same order as in the ArgSizeCalculator::calculate
encode_members(buffer, conditional_arg_size_cache, conditional_arg_size_cache_index, user.name,
user.surname, user.age, user.favorite_colors);
}
};
/***/
template <>
struct quill::detail::Decoder<User>
{
static ::User decode(std::byte*& buffer, DynamicFormatArgStore* args_store)
{
// You must decode the same members and in the same order as in the Encoder::encode
::User user;
decode_and_assign_members(buffer, args_store, user, user.name, user.surname, user.age, user.favorite_colors);
return user;
// note:
// If the object is not default constructible you have to do it manually without
// decode_members helper
// auto name = Decoder<decltype(std::declval<::User>().name)>::decode(buffer, nullptr);
// auto surname = Decoder<decltype(std::declval<::User>().surname)>::decode(buffer, nullptr);
// auto age = Decoder<decltype(std::declval<::User>().age)>::decode(buffer, nullptr);
// auto favorite_colors = Decoder<decltype(std::declval<::User>().favorite_colors)>::decode(buffer, nullptr);
// ::User user{name, surname, age, favorite_colors};
// if (args_store)
// {
// args_store->push_back(user);
// }
// return user;
}
};

View file

@ -0,0 +1,59 @@
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include "quill/sinks/ConsoleSink.h"
#include <chrono>
#include <iostream>
#include <thread>
#include <utility>
/**
* This example demonstrates how to manually wake up the backend thread when configuring
* it with a longer sleep duration.
*
* Note: It's generally advised not to set the sleep duration higher than 1 second.
* However, it's still possible to do so.
* The only practical use case for this is when you want to prevent the backend thread
* from waking up periodically to conserve CPU cycles.
*/
int main()
{
// Start the backend thread
quill::BackendOptions backend_options;
backend_options.sleep_duration = std::chrono::hours{24};
quill::Backend::start(backend_options);
// To demonstrate the example we have to wait for the backend thread to start and then
// go into sleep as there will be nothing to log, so we wait here a bit
std::this_thread::sleep_for(std::chrono::seconds{1});
// Frontend
auto console_sink = quill::Frontend::create_or_get_sink<quill::ConsoleSink>("sink_id_1");
quill::Logger* logger = quill::Frontend::create_or_get_logger("root", std::move(console_sink));
LOG_INFO(logger, "This is a log info example {}", 5);
LOG_WARNING(logger, "This is a log warning example {}", 6);
LOG_ERROR(logger, "This is a log error example {}", 7);
// sleep for 5 seconds, the backend thread will also be sleeping so no logs are displayed
std::cout << "waiting for the backend thread..." << std::endl;
std::this_thread::sleep_for(std::chrono::seconds{5});
std::cout << "notifying for the backend thread..." << std::endl;
quill::Backend::notify();
std::this_thread::sleep_for(std::chrono::seconds{1}); // let backend sleep again
LOG_INFO(logger, "This is a log info example {}", 15);
LOG_WARNING(logger, "This is a log warning example {}", 16);
LOG_ERROR(logger, "This is a log error example {}", 17);
std::cout << "waiting for the backend thread..." << std::endl;
std::this_thread::sleep_for(std::chrono::seconds{5});
std::cout << "notifying for the backend thread..." << std::endl;
quill::Backend::notify();
std::this_thread::sleep_for(std::chrono::seconds{1}); // let backend sleep again
LOG_INFO(logger, "Done, backend thread will always wake up and log on destruction");
}

View file

@ -0,0 +1,54 @@
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include "quill/sinks/ConsoleSink.h"
#include <utility>
/**
* This example showcases the usage of LOG_BACKTRACE macros. Log messages generated using these
* macros are enqueued from the frontend to the backend. However, the backend will only log them if
* a specific condition is met or if they are manually flushed using flush_backtrace().
*/
int main()
{
// Start the backend thread
quill::BackendOptions backend_options;
quill::Backend::start(backend_options);
// Frontend
auto console_sink = quill::Frontend::create_or_get_sink<quill::ConsoleSink>("sink_id_1");
quill::Logger* logger = quill::Frontend::create_or_get_logger("root", std::move(console_sink));
logger->set_log_level(quill::LogLevel::Debug);
// Enable the backtrace with a ring buffer capacity of 2 messages to get flushed when
// a LOG_ERROR(...) or higher severity log message occurs via this logger.
// Backtrace has to be enabled only once in the beginning before calling LOG_BACKTRACE(...) for the first time.
logger->init_backtrace(2u, quill::LogLevel::Error);
LOG_INFO(logger, "Begin example {}", 1);
LOG_BACKTRACE(logger, "Backtrace log {}", 1);
LOG_BACKTRACE(logger, "Backtrace log {}", 2);
LOG_BACKTRACE(logger, "Backtrace log {}", 3);
LOG_BACKTRACE(logger, "Backtrace log {}", 4);
LOG_INFO(logger, "Backtrace is not flushed yet as we requested to flush on errors {}", 1);
// log message with severity error - This will also flush the backtrace which has 2 messages
LOG_ERROR(logger, "An error has happened, Backtrace is also flushed.");
// The backtrace is flushed again after LOG_ERROR but in this case it is empty
LOG_ERROR(logger,
"Another second error has happened, backtrace is flushed again but it is now empty.");
// Log more backtrace messages
LOG_BACKTRACE(logger, "Another Backtrace log {}", 1);
LOG_BACKTRACE(logger, "Another Backtrace log {}", 2);
LOG_DEBUG(logger, "No backtrace logs yet");
LOG_INFO(logger, "Manually flush backtrace");
logger->flush_backtrace();
}

View file

@ -0,0 +1,53 @@
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include "quill/sinks/ConsoleSink.h"
#include <cstdint>
#include <utility>
/**
* This example demonstrates how to change the type of the Single Producer Single Consumer (SPSC)
* queue used by the frontend.
*
* By default, the library uses an UnboundedBlocking queue, which starts small with
* initial_queue_capacity and reallocates up to 2GB as needed.
*/
/**
* Create a custom frontend config
*/
struct CustomFrontendOptions
{
// Set the queue to BoundedDropping
static constexpr quill::QueueType queue_type = quill::QueueType::BoundedDropping;
// Set small capacity to demonstrate dropping messages in this example
static constexpr uint32_t initial_queue_capacity = 256;
static constexpr uint32_t blocking_queue_retry_interval_ns = 800;
static constexpr bool huge_pages_enabled = false;
};
/**
* A new Frontend and Logger should be defined to use the custom frontend options.
*/
using CustomFrontend = quill::FrontendImpl<CustomFrontendOptions>;
using CustommLogger = quill::LoggerImpl<CustomFrontendOptions>;
int main()
{
// Start the backend thread
quill::BackendOptions backend_options;
quill::Backend::start(backend_options);
// Frontend
auto console_sink = CustomFrontend::create_or_get_sink<quill::ConsoleSink>("sink_id_1");
CustommLogger* logger = CustomFrontend::create_or_get_logger("root", std::move(console_sink));
for (int i = 0; i < 32; ++i)
{
LOG_INFO(logger, "Bounded queue example log message num {}", i);
}
}

View file

@ -0,0 +1,59 @@
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include "quill/sinks/ConsoleSink.h"
#include <string>
#include <utility>
/**
* Trivial logging example to console
*/
int main()
{
// Start the backend thread
quill::BackendOptions backend_options;
quill::Backend::start(backend_options);
// Frontend
auto console_sink = quill::Frontend::create_or_get_sink<quill::ConsoleSink>("sink_id_1");
quill::Logger* logger = quill::Frontend::create_or_get_logger("root", std::move(console_sink));
// Change the LogLevel to print everything
logger->set_log_level(quill::LogLevel::TraceL3);
LOG_TRACE_L3(logger, "This is a log trace l3 example {}", 1);
LOG_TRACE_L2(logger, "This is a log trace l2 example {} {}", 2, 2.3);
LOG_TRACE_L1(logger, "This is a log trace l1 {} example", "string");
LOG_DEBUG(logger, "This is a log debug example {}", 4);
LOG_INFO(logger, "This is a log info example {}", sizeof(std::string));
LOG_WARNING(logger, "This is a log warning example {}", sizeof(std::string));
LOG_ERROR(logger, "This is a log error example {}", sizeof(std::string));
LOG_CRITICAL(logger, "This is a log critical example {}", sizeof(std::string));
// libfmt format specification mini language is supported
// note: named arguments are not supported
LOG_INFO(logger, "Support for int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}", 42);
LOG_INFO(logger, "Easy padding in numbers like {:08d}", 12);
LOG_INFO(logger, "{:>30}", "right aligned");
LOG_INFO(logger, "Positional arguments {1} {2} {0} ", "too", "are", "supported");
LOG_INFO(logger, "Support for precision {:.4f}", 1.23456);
// To log with a different format to the same sink, just create another logger
auto console_sink_2 = quill::Frontend::get_sink("sink_id_1"); // get the created sink
quill::Logger* logger_2 = quill::Frontend::create_or_get_logger(
"logger_2", std::move(console_sink_2), "%(time) %(log_level:<9) %(logger:<12) %(message)");
logger_2->set_log_level(quill::LogLevel::TraceL3);
LOG_TRACE_L3(logger_2, "This is a log trace l3 example {}", 1);
LOG_TRACE_L2(logger_2, "This is a log trace l2 example {} {}", 2, 2.3);
LOG_TRACE_L1(logger_2, "This is a log trace l1 {} example", "string");
LOG_DEBUG(logger_2, "This is a log debug example {}", 4);
LOG_INFO(logger_2, "This is a log info example {}", sizeof(std::string));
LOG_WARNING(logger_2, "This is a log warning example {}", sizeof(std::string));
LOG_ERROR(logger_2, "This is a log error example {}", sizeof(std::string));
LOG_CRITICAL(logger_2, "This is a log critical example {}", sizeof(std::string));
}

View file

@ -0,0 +1,42 @@
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include "quill/sinks/ConsoleSink.h"
#include <string>
#include <utility>
/**
* The example demonstrates how to customise the console colours
*/
int main()
{
// Start the backend thread
quill::BackendOptions backend_options;
quill::Backend::start(backend_options);
// Frontend
quill::ConsoleColours custom_console_colours;
custom_console_colours.set_default_colours();
custom_console_colours.set_colour(quill::LogLevel::Info, quill::ConsoleColours::blue); // overwrite the colour for INFO
// Create the sink
auto console_sink =
quill::Frontend::create_or_get_sink<quill::ConsoleSink>("sink_id_1", custom_console_colours);
quill::Logger* logger = quill::Frontend::create_or_get_logger("root", std::move(console_sink));
// Change the LogLevel to print everything
logger->set_log_level(quill::LogLevel::TraceL3);
LOG_TRACE_L3(logger, "This is a log trace l3 example {}", 1);
LOG_TRACE_L2(logger, "This is a log trace l2 example {} {}", 2, 2.3);
LOG_TRACE_L1(logger, "This is a log trace l1 {} example", "string");
LOG_DEBUG(logger, "This is a log debug example {}", 4);
LOG_INFO(logger, "This is a log info example {}", sizeof(std::string));
LOG_WARNING(logger, "This is a log warning example {}", sizeof(std::string));
LOG_ERROR(logger, "This is a log error example {}", sizeof(std::string));
LOG_CRITICAL(logger, "This is a log critical example {}", sizeof(std::string));
}

View file

@ -0,0 +1,47 @@
/**
* This example demonstrates defining and utilizing custom FrontendOptions.
* It's useful when you need to modify the queue type or capacity.
* FrontendOptions are compile-time options and must be passed as a template argument.
*/
// Backend - required to start the backend thread
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/sinks/ConsoleSink.h"
// Include only Logger, LogMacros as they are the minimal required files for logging functionality
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include <utility>
// define your own FrontendOptions, see "core/FrontendOptions.h" for details
struct CustomFrontendOptions
{
static constexpr quill::QueueType queue_type = quill::QueueType::BoundedDropping;
static constexpr uint32_t initial_queue_capacity = 131'072;
static constexpr uint32_t blocking_queue_retry_interval_ns = 800;
static constexpr bool huge_pages_enabled = false;
};
// To utilize our custom FrontendOptions, we define a Frontend class using CustomFrontendOptions
using CustomFrontend = quill::FrontendImpl<CustomFrontendOptions>;
// The Logger type must also be defined
using CustomLogger = quill::LoggerImpl<CustomFrontendOptions>;
int main()
{
// Start the backend thread
quill::BackendOptions backend_options;
quill::Backend::start(backend_options); // or quill::Backend::start_with_signal_handler<CustomFrontendOptions>();
// All frontend operations must utilize CustomFrontend instead of quill::Frontend
auto console_sink = CustomFrontend::create_or_get_sink<quill::ConsoleSink>("sink_id_1");
CustomLogger* logger = CustomFrontend::create_or_get_logger("root", std::move(console_sink));
// log something
LOG_INFO(logger, "This is a log info example {}", 123);
LOG_WARNING(logger, "This is a log warning example {}", 123);
}

View file

@ -0,0 +1,61 @@
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include "quill/sinks/FileSink.h"
#include <utility>
/**
* Logging to the same file using multiple loggers
*/
void log_from_new_logger()
{
// Obtain the existing created sink, and create a new logger to log
auto file_sink = quill::Frontend::get_sink("example_file_logging.log");
quill::Logger* logger_2 = quill::Frontend::create_or_get_logger("root", std::move(file_sink));
LOG_INFO(logger_2, "log from new logger {}", 123);
}
void log_from_existing_logger()
{
// Obtain existing logger to log
quill::Logger* logger = quill::Frontend::get_logger("root");
LOG_INFO(logger, "log again {}", 123312);
}
int main()
{
// Start the backend thread
quill::BackendOptions backend_options;
quill::Backend::start(backend_options);
// Frontend
auto file_sink = quill::Frontend::create_or_get_sink<quill::FileSink>(
"example_file_logging.log",
[]()
{
quill::FileSinkConfig cfg;
cfg.set_open_mode('w');
cfg.set_filename_append_option(quill::FilenameAppendOption::StartDateTime);
return cfg;
}(),
quill::FileEventNotifier{});
quill::Logger* logger =
quill::Frontend::create_or_get_logger("root", std::move(file_sink),
"%(time) [%(thread_id)] %(short_source_location:<28) "
"LOG_%(log_level:<9) %(logger:<12) %(message)",
"%H:%M:%S.%Qns", quill::Timezone::GmtTime);
// set the log level of the logger to debug (default is info)
logger->set_log_level(quill::LogLevel::Debug);
LOG_INFO(logger, "log something {}", 123);
LOG_DEBUG(logger, "something else {}", 456);
log_from_new_logger();
log_from_existing_logger();
}

View file

@ -0,0 +1,41 @@
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include "quill/sinks/ConsoleSink.h"
#include <string>
#include <utility>
/**
* This example demonstrates the usage of the built-in filter, which can filter log levels.
* When a filter is applied, log messages are still enqueued from the frontend to the backend.
* Subsequently, the backend dynamically filters them based on a given condition.
*/
int main()
{
// Start the backend thread
quill::BackendOptions backend_options;
quill::Backend::start(backend_options);
// Frontend
auto console_sink = quill::Frontend::create_or_get_sink<quill::ConsoleSink>("sink_id_1");
// Set a filter for the sink
console_sink->set_log_level_filter(quill::LogLevel::Info);
quill::Logger* logger = quill::Frontend::create_or_get_logger("root", std::move(console_sink));
// Change the LogLevel to send everything
logger->set_log_level(quill::LogLevel::TraceL3);
LOG_TRACE_L3(logger, "This is a log trace l3 example {}", 1);
LOG_TRACE_L2(logger, "This is a log trace l2 example {} {}", 2, 2.3);
LOG_TRACE_L1(logger, "This is a log trace l1 {} example", "string");
LOG_DEBUG(logger, "This is a log debug example {}", 4);
LOG_INFO(logger, "This is a log info example {}", sizeof(std::string));
LOG_WARNING(logger, "This is a log warning example {}", sizeof(std::string));
LOG_ERROR(logger, "This is a log error example {}", sizeof(std::string));
LOG_CRITICAL(logger, "This is a log critical example {}", sizeof(std::string));
}

View file

@ -0,0 +1,63 @@
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include "quill/sinks/ConsoleSink.h"
#include "quill/sinks/JsonFileSink.h"
#include <utility>
/**
* This example showcases the usage of the JsonFileSink to generate JSON-formatted logs.
* Additionally, it demonstrates how to simultaneously log in both the standard logger output
* format, e.g., to console and the corresponding JSON format to a JSON output sink.
*
* For successful JSON logging, it's essential to use named placeholders within the provided
* format string, such as "{method}" and "{endpoint}".
*/
int main()
{
// Start the backend thread
quill::BackendOptions backend_options;
quill::Backend::start(backend_options);
// Frontend
// Create a json file for output
auto json_sink = quill::Frontend::create_or_get_sink<quill::JsonFileSink>(
"json_sink_logging.log",
[]()
{
quill::JsonFileSinkConfig cfg;
cfg.set_open_mode('w');
cfg.set_filename_append_option(quill::FilenameAppendOption::StartDateTime);
return cfg;
}(),
quill::FileEventNotifier{});
// When using the JsonFileSink, it is ideal to set the logging pattern to empty to avoid unnecessary message formatting.
quill::Logger* json_logger = quill::Frontend::create_or_get_logger(
"json_logger", std::move(json_sink), "", "%H:%M:%S.%Qns", quill::Timezone::GmtTime);
for (int i = 0; i < 2; ++i)
{
LOG_INFO(json_logger, "{method} to {endpoint} took {elapsed} ms", "POST", "http://", 10 * i);
}
// It is also possible to create a logger than logs to both the json file and stdout
// with the appropriate format
auto json_sink_2 = quill::Frontend::get_sink("json_sink_logging.log");
auto console_sink = quill::Frontend::create_or_get_sink<quill::ConsoleSink>("console_sink_id_1");
// We set a custom format pattern here to also include the named_args
quill::Logger* hybrid_logger = quill::Frontend::create_or_get_logger(
"hybrid_logger", {std::move(json_sink_2), std::move(console_sink)},
"%(time) [%(thread_id)] %(short_source_location:<28) LOG_%(log_level:<9) %(logger:<20) "
"%(message) [%(named_args)]");
for (int i = 2; i < 4; ++i)
{
LOG_INFO(hybrid_logger, "{method} to {endpoint} took {elapsed} ms", "POST", "http://", 10 * i);
}
}

View file

@ -0,0 +1,92 @@
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include "quill/sinks/FileSink.h"
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
#include <utility>
/**
* This example demonstrates the creation and removal of a logger object for each instance of the Session class.
* Each session instance logs to a new log file using a unique logger.
* When the session ends, the logger is removed, and the associated file is closed.
* Additionally, it showcases the usage of the FileEventNotifier, which provides notifications for file changes.
*/
class Session
{
public:
explicit Session(std::string const& unique_name)
{
// Set up a FileEventNotifier so we are notified on file changes
quill::FileEventNotifier file_notifier;
file_notifier.before_open = [](quill::fs::path const& filename)
{ std::cout << "file_notifier - preparing to open file " << filename << std::endl; };
file_notifier.after_open = [](quill::fs::path const& filename, FILE* f)
{ std::cout << "file_notifier - opened file " << filename << std::endl; };
file_notifier.before_close = [](quill::fs::path const& filename, FILE* f)
{ std::cout << "file_notifier - preparing to close file " << filename << std::endl; };
file_notifier.after_close = [](quill::fs::path const& filename)
{ std::cout << "file_notifier - closed file " << filename << std::endl; };
// Create a new log file for this session
auto file_sink = quill::Frontend::create_or_get_sink<quill::FileSink>(
std::string{"session_"} + unique_name + ".log",
[]()
{
quill::FileSinkConfig cfg;
cfg.set_open_mode('w');
cfg.set_filename_append_option(quill::FilenameAppendOption::None);
return cfg;
}(),
file_notifier);
// Create a session specific logger for the current session
_logger = quill::Frontend::create_or_get_logger(unique_name, std::move(file_sink));
LOG_INFO(_logger, "Hello from session {}", unique_name);
}
~Session()
{
// Remove the logger when the session is done. That will also remove the associated file_handler
// and close the file as long as no other logger is using that file_handler
quill::Frontend::remove_logger(_logger);
}
private:
quill::Logger* _logger{nullptr};
};
int main()
{
// Start the backend thread
quill::BackendOptions backend_options;
quill::Backend::start(backend_options);
{
Session session_1 = Session{"SessionA"};
std::this_thread::sleep_for(std::chrono::seconds{3});
Session session_2 = Session{"SessionB"};
std::this_thread::sleep_for(std::chrono::seconds{3});
}
std::this_thread::sleep_for(std::chrono::seconds{3});
{
Session session_3 = Session{"SessionC"};
std::this_thread::sleep_for(std::chrono::seconds{3});
}
std::this_thread::sleep_for(std::chrono::seconds{3});
Session session_4 = Session{"SessionD"};
std::this_thread::sleep_for(std::chrono::seconds{3});
}

View file

@ -0,0 +1,9 @@
add_subdirectory(quill_wrapper)
add_executable(quill_example_recommended_usage recommended_usage.cpp)
set_common_compile_options(quill_example_recommended_usage)
target_link_libraries(quill_example_recommended_usage quill_wrapper_recommended)
add_executable(quill_example_use_overwrite_macros use_overwrite_macros.cpp)
set_common_compile_options(quill_example_use_overwrite_macros)
target_link_libraries(quill_example_use_overwrite_macros quill_wrapper_recommended)

View file

@ -0,0 +1,17 @@
set(LIB_NAME quill_wrapper_recommended)
add_library(${LIB_NAME} STATIC
include/quill_wrapper/overwrite_macros.h
include/quill_wrapper/quill_wrapper.h
include/quill_wrapper/quill_wrapper.cpp)
# Add include directories for this library
target_include_directories(${LIB_NAME}
PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:include>
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR})
# Link quill dependency
target_link_libraries(${LIB_NAME} PUBLIC quill::quill)

View file

@ -0,0 +1,28 @@
#pragma once
/**
* This file is optional and demonstrates how users can replace the library macros with their own.
*
* It becomes useful when opting for a single Logger object over several logger objects.
*
* However, using multiple logger objects provides greater flexibility in applications with multiple
* components. With multiple loggers, it's easier to control the log level of each component
* individually, whereas using a single logger, as shown in this example, may present limitations.
*/
/**
* By defining QUILL_DISABLE_NON_PREFIXED_MACROS before including LogMacros, we disable the
* default 'LOG_' and then create our own macros using the global logger.
*/
#define QUILL_DISABLE_NON_PREFIXED_MACROS
#include "quill/LogMacros.h"
#include "quill/Logger.h"
// The logger we defined in quill_wrapper.cpp
extern quill::Logger* global_logger_a;
// Define custom log macros using global_logger_a. Two examples are provided here for demonstration.
#define LOG_INFO(fmt, ...) QUILL_LOG_INFO(global_logger_a, fmt, ##__VA_ARGS__)
#define LOG_WARNING(fmt, ...) QUILL_LOG_WARNING(global_logger_a, fmt, ##__VA_ARGS__)
// etc ..

View file

@ -0,0 +1,35 @@
#include "quill_wrapper.h"
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/Logger.h"
#include "quill/sinks/FileSink.h"
// Define a global variable for a logger to avoid looking up the logger each time.
// Additional global variables can be defined for additional loggers if needed.
quill::Logger* global_logger_a;
void setup_quill(char const* log_file)
{
// Start the backend thread
quill::Backend::start();
// Setup sink and logger
auto file_sink = quill::Frontend::create_or_get_sink<quill::FileSink>(
log_file,
[]()
{
quill::FileSinkConfig cfg;
cfg.set_open_mode('w');
cfg.set_filename_append_option(quill::FilenameAppendOption::StartDateTime);
return cfg;
}(),
quill::FileEventNotifier{});
// Create and store the logger
global_logger_a =
quill::Frontend::create_or_get_logger("root", std::move(file_sink),
"%(time) [%(thread_id)] %(short_source_location:<28) "
"LOG_%(log_level:<9) %(logger:<12) %(message)",
"%H:%M:%S.%Qns", quill::Timezone::GmtTime);
}

View file

@ -0,0 +1,3 @@
#pragma once
void setup_quill(char const* log_file);

View file

@ -0,0 +1,49 @@
/**
* This example demonstrates the recommended setup for the Quill library.
*
* It is advisable to encapsulate the header-only library into a static library, which
* you build once and link to your main application.
* This library should include `quill/backend` in the .cpp files.
*
* In your application, include only the following headers for logging:
*
* - For logger lookup or creation:
* #include "quill/Frontend.h"
*
* - For sink creation:
* #include "quill/sinks/.."
*
* - For logging:
* #include "quill/Logger.h"
* #include "quill/LogMacros.h"
*/
// Include our wrapper lib
#include "quill_wrapper/quill_wrapper.h"
// We need only those two headers in order to log
#include "quill/LogMacros.h"
#include "quill/Logger.h"
// We utilize the global_logger_a from the quill_wrapper library.
// The use of a global logger is optional.
// Alternatively, we could include "quill/Frontend.h" and use `quill::Frontend::get_logger(..)`
// to obtain the created logger, or we could store it as a class member.
extern quill::Logger* global_logger_a;
int main()
{
setup_quill("recommended_usage.log");
// Change the LogLevel to print everything
global_logger_a->set_log_level(quill::LogLevel::TraceL3);
LOG_TRACE_L3(global_logger_a, "This is a log trace l3 example {}", 1);
LOG_TRACE_L2(global_logger_a, "This is a log trace l2 example {} {}", 2, 2.3);
LOG_TRACE_L1(global_logger_a, "This is a log trace l1 {} example", "string");
LOG_DEBUG(global_logger_a, "This is a log debug example {}", 4);
LOG_INFO(global_logger_a, "This is a log info example {}", 5);
LOG_WARNING(global_logger_a, "This is a log warning example {}", 6);
LOG_ERROR(global_logger_a, "This is a log error example {}", 7);
LOG_CRITICAL(global_logger_a, "This is a log critical example {}", 118);
}

View file

@ -0,0 +1,13 @@
#include "quill_wrapper/overwrite_macros.h"
#include "quill_wrapper/quill_wrapper.h"
int main()
{
setup_quill("use_overwrite_macros.log");
// Change the LogLevel to print everything
global_logger_a->set_log_level(quill::LogLevel::TraceL3);
LOG_INFO("This is a log info example {}", 5);
LOG_WARNING("This is a log warning example {}", 6);
}

View file

@ -0,0 +1,44 @@
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include "quill/sinks/RotatingFileSink.h"
#include <utility>
/**
* This example demonstrates how to create a RotatingFileSink with daily rotation and automatic rotation based on maximum file size.
* For additional configuration options, refer to RotatingFileSinkConfig.
*/
int main()
{
// Start the backend thread
quill::BackendOptions backend_options;
quill::Backend::start(backend_options);
// Frontend
auto rotating_file_sink = quill::Frontend::create_or_get_sink<quill::RotatingFileSink>(
"rotating_file.log",
[]()
{
// See RotatingFileSinkConfig for more options
quill::RotatingFileSinkConfig cfg;
cfg.set_open_mode('w');
cfg.set_filename_append_option(quill::FilenameAppendOption::StartDateTime);
cfg.set_rotation_time_daily("18:30");
cfg.set_rotation_max_file_size(1024); // small value to demonstrate the example
return cfg;
}());
quill::Logger* logger =
quill::Frontend::create_or_get_logger("root", std::move(rotating_file_sink),
"%(time) [%(thread_id)] %(short_source_location:<28) "
"LOG_%(log_level:<9) %(logger:<12) %(message)",
"%H:%M:%S.%Qns", quill::Timezone::GmtTime);
for (int i = 0; i < 20; ++i)
{
LOG_INFO(logger, "Hello from rotating logger, index is {}", i);
}
}

View file

@ -0,0 +1,90 @@
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include "quill/sinks/ConsoleSink.h"
#include <chrono>
#include <csignal>
#include <cstdint>
#include <thread>
#include <utility>
#include <vector>
void cause_segfault(quill::Logger* logger)
{
LOG_INFO(logger, "Crashing");
int* p = (int*)0x12345678;
*p = 0;
}
void infinite_loop(quill::Logger* logger)
{
LOG_INFO(logger, "Crashing");
/* break out with ctrl+c to test SIGINT handling */
while (1)
{
};
}
void illegal_instruction(quill::Logger* logger)
{
LOG_INFO(logger, "Crashing");
raise(SIGILL);
}
/**
* Signal handler example
* The signal handler flushes the log when the application crashes or gets terminated
*/
int main()
{
#if defined(_WIN32)
// NOTE: On windows a signal handler must be installed on each new thread
quill::init_signal_handler<quill::FrontendOptions>();
#endif
// Start the logging backend thread with a signal handler
// On Linux/Macos one signal handler is set to handle POSIX style signals
// On Windows an exception handler and a Ctrl-C handler is set.
quill::Backend::start_with_signal_handler<quill::FrontendOptions>();
auto console_sink = quill::Frontend::create_or_get_sink<quill::ConsoleSink>("sink_id_1");
quill::Logger* logger = quill::Frontend::create_or_get_logger("root", std::move(console_sink));
// create threads that will cause a segfault
std::vector<std::thread> threads;
for (size_t i = 0; i < 4; ++i)
{
threads.emplace_back(
[logger]()
{
#if defined(_WIN32)
// NOTE: On windows the signal handler must be installed on each new thread
quill::init_signal_handler<quill::FrontendOptions>();
#endif
// sleep for 1 second so all threads are ready
std::this_thread::sleep_for(std::chrono::seconds{1});
for (size_t i = 0; i < 10; ++i)
{
// log 10 messages
LOG_INFO(logger, "Log from thread {}", i);
}
// After 10 messages Crash - Uncomment any of the below :
// illegal_instruction(logger);
// cause_segfault(logger);
});
}
for (uint32_t cnt{0}; cnt < 1000; ++cnt)
{
std::this_thread::sleep_for(std::chrono::milliseconds{300});
LOG_INFO(logger, "Log from main {}", cnt);
}
}

View file

@ -0,0 +1,39 @@
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include "quill/sinks/ConsoleSink.h"
#include <string>
#include <utility>
/**
* Trivial logging example to console using std::chrono::now for timestamps instead of the
* default rdtsc clock
*/
int main()
{
// Start the backend thread
quill::BackendOptions backend_options;
quill::Backend::start(backend_options);
// Frontend
auto console_sink = quill::Frontend::create_or_get_sink<quill::ConsoleSink>("sink_id_1");
quill::Logger* logger = quill::Frontend::create_or_get_logger(
"root", std::move(console_sink), "%(time) [%(process_id)] [%(thread_id)] %(logger) - %(message)",
"%D %H:%M:%S.%Qms %z", quill::Timezone::GmtTime, quill::ClockSourceType::System);
// Change the LogLevel to print everything
logger->set_log_level(quill::LogLevel::TraceL3);
LOG_TRACE_L3(logger, "This is a log trace l3 example {}", 1);
LOG_TRACE_L2(logger, "This is a log trace l2 example {} {}", 2, 2.3);
LOG_TRACE_L1(logger, "This is a log trace l1 {} example", "string");
LOG_DEBUG(logger, "This is a log debug example {}", 4);
LOG_INFO(logger, "This is a log info example {}", sizeof(std::string));
LOG_WARNING(logger, "This is a log warning example {}", sizeof(std::string));
LOG_ERROR(logger, "This is a log error example {}", sizeof(std::string));
LOG_CRITICAL(logger, "This is a log critical example {}", sizeof(std::string));
}

View file

@ -0,0 +1,97 @@
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include "quill/Utility.h"
#include "quill/sinks/ConsoleSink.h"
#include <cstdint>
#include <string>
#include <string_view>
#include <utility>
/**
* This example showcases the usage of the _WITH_TAGS macros, which allow for logging additional
* tags with each log message.
*
* The example demonstrates the construction of custom tags at compile time using
* the quill::CustomTags interface.
*
* One important limitation to note is that quill::CustomTags instances must be constructed
* at compile time.
*/
class MyTagsA : public quill::Tags
{
public:
constexpr MyTagsA(char const* tag_a, uint32_t tag_b) : _tag_a(tag_a), _tag_b(tag_b) {}
void format(std::string& out) const override
{
out.append(_tag_a);
out.append(":");
out.append(std::to_string(_tag_b));
}
private:
char const* _tag_a;
uint32_t _tag_b;
};
// Define another CustomTags class
class MyTagsB : public quill::Tags
{
public:
constexpr MyTagsB(char const* tag_a, uint32_t tag_b) : _tag_a(tag_a), _tag_b(tag_b) {}
void format(std::string& out) const override
{
out.append(_tag_a);
out.append(":");
out.append(std::to_string(_tag_b));
}
private:
char const* _tag_a;
uint32_t _tag_b;
};
static constexpr MyTagsA tags_a{"CUSTOM_TAG_A", 12};
static constexpr MyTagsB tags_b{"CUSTOM_TAG_B", 23};
// Combine different tags
static constexpr quill::utility::CombinedTags<MyTagsA, MyTagsB> tags_ab{tags_a, tags_b};
static constexpr quill::utility::CombinedTags<MyTagsA, MyTagsB> tags_ab_custom_format_delimiter{
tags_a, tags_b, " -- "};
int main()
{
// Start the backend thread
quill::BackendOptions backend_options;
quill::Backend::start(backend_options);
auto console_sink = quill::Frontend::create_or_get_sink<quill::ConsoleSink>("sink_id_1");
// It is important to change the default logging pattern to include the custom tags
quill::Logger* logger = quill::Frontend::create_or_get_logger(
"root", std::move(console_sink),
"%(time) [%(thread_id)] %(short_source_location:<28) %(log_level:<9) %(logger:<16) "
"[%(custom_tags)] %(message)",
"%Y-%m-%d %H:%M:%S.%Qms", quill::Timezone::GmtTime);
LOG_TRACE_L3_WITH_TAGS(logger, tags_a, "TraceL3 with custom tags");
LOG_TRACE_L2_WITH_TAGS(logger, tags_b, "TraceL2 with custom tags");
LOG_TRACE_L1_WITH_TAGS(logger, tags_b, "TraceL1 with custom tags");
LOG_DEBUG_WITH_TAGS(logger, tags_a, "Debug with custom tags");
LOG_INFO_WITH_TAGS(logger, tags_a, "Info with custom tags");
LOG_WARNING_WITH_TAGS(logger, tags_b, "Warning with custom tags");
LOG_ERROR_WITH_TAGS(logger, tags_a, "Error with custom tags");
LOG_INFO_WITH_TAGS(logger, tags_ab, "Info with combined custom tags");
LOG_INFO_WITH_TAGS(logger, tags_ab_custom_format_delimiter,
"Combined custom tags custom delimiter");
LOG_INFO(logger, "Without custom tags");
}

View file

@ -0,0 +1,81 @@
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include "quill/UserClockSource.h"
#include "quill/sinks/ConsoleSink.h"
#include <atomic>
#include <chrono>
#include <cstdint>
#include <utility>
/**
* Example demonstrating custom clock usage for logging. This is particularly useful
* when simulating events from a specific time period, allowing logs to align with
* the simulated time.
*/
/**
* This class needs to be thread-safe, unless only a single thread in the application calling
* LOG_ macros from the same logger
*/
class SimulatedClock : public quill::UserClockSource
{
public:
SimulatedClock() = default;
/**
* Required by TimestampClock
* @return current time now, in nanoseconds since epoch
*/
uint64_t now() const override { return _timestamp_ns.load(std::memory_order_relaxed); }
/**
* set custom timestamp
* @param time_since_epoch timestamp
*/
void set_timestamp(std::chrono::seconds time_since_epoch)
{
// always convert to nanos
_timestamp_ns.store(static_cast<uint64_t>(std::chrono::nanoseconds{time_since_epoch}.count()),
std::memory_order_relaxed);
}
private:
std::atomic<uint64_t> _timestamp_ns{0}; // time since epoch - must always be in nanoseconds
};
int main()
{
// Start the backend thread
quill::BackendOptions backend_options;
quill::Backend::start(backend_options);
// Create a simulated timestamp class. Quill takes a pointer to this class,
// and the user is responsible for its lifetime.
// Ensure that the instance of this class remains alive for as long as the logger
// object exists, until the logger is removed.
SimulatedClock simulated_clock;
// Get the console sink and also create a logger using the simulated clock
auto console_sink = quill::Frontend::create_or_get_sink<quill::ConsoleSink>("sink_id_1");
quill::Logger* logger = quill::Frontend::create_or_get_logger(
"root", std::move(console_sink),
"%(time) %(short_source_location:<28) LOG_%(log_level:<9) %(logger:<12) %(message)",
"%D %H:%M:%S.%Qns", quill::Timezone::LocalTime, quill::ClockSourceType::User, &simulated_clock);
// Change the LogLevel to print everything
logger->set_log_level(quill::LogLevel::TraceL3);
// Set our timestamp to Sunday 12 June 2022
simulated_clock.set_timestamp(std::chrono::seconds{1655007309});
LOG_TRACE_L3(logger, "This is a log trace l3 example {}", 1);
LOG_TRACE_L2(logger, "This is a log trace l2 example {} {}", 2, 2.3);
// update our timestamp
simulated_clock.set_timestamp(std::chrono::seconds{1655039000});
LOG_INFO(logger, "This is a log info {} example", "string");
LOG_DEBUG(logger, "This is a log debug example {}", 4);
}

View file

@ -0,0 +1,58 @@
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include "quill/filters/Filter.h"
#include "quill/sinks/ConsoleSink.h"
#include <cstdint>
#include <string>
#include <string_view>
#include <utility>
/**
* This example demonstrates the creation usage of a user defined filter.
* When a filter is applied, log messages are still enqueued from the frontend to the backend.
* Subsequently, the backend dynamically filters them based on a given condition.
*/
class UserFilter : public quill::Filter
{
public:
UserFilter() : quill::Filter("filter_1"){};
bool filter(quill::MacroMetadata const* log_metadata, uint64_t log_timestamp,
std::string_view thread_id, std::string_view thread_name, std::string_view logger_name,
quill::LogLevel log_level, std::string_view log_message) noexcept override
{
// for example filter out lines 47 and 48 of any file
return (std::stoi(log_metadata->line()) != 47) && (std::stoi(log_metadata->line()) != 48);
}
};
int main()
{
// Start the backend thread
quill::BackendOptions backend_options;
quill::Backend::start(backend_options);
// Frontend
auto console_sink = quill::Frontend::create_or_get_sink<quill::ConsoleSink>("sink_id_1");
// Add the filter - adding filters is thread safe and can be called anytime
console_sink->add_filter(std::make_unique<UserFilter>());
quill::Logger* logger = quill::Frontend::create_or_get_logger("root", std::move(console_sink));
// Change the LogLevel to send everything
logger->set_log_level(quill::LogLevel::TraceL3);
LOG_TRACE_L3(logger, "This is a log trace l3 example {}", 1);
LOG_TRACE_L2(logger, "This is a log trace l2 example {} {}", 2, 2.3);
LOG_TRACE_L1(logger, "This is a log trace l1 {} example", "string");
LOG_DEBUG(logger, "This is a log debug example {}", 4);
LOG_INFO(logger, "This is a log info example {}", sizeof(std::string));
LOG_WARNING(logger, "This is a log warning example {}", sizeof(std::string));
LOG_ERROR(logger, "This is a log error example {}", sizeof(std::string));
LOG_CRITICAL(logger, "This is a log critical example {}", sizeof(std::string));
}

View file

@ -0,0 +1,77 @@
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include "quill/sinks/Sink.h"
#include <cstdint>
#include <iostream>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
/**
* This example demonstrates how to implement a custom Sink
*/
class UserSink final : public quill::Sink
{
public:
UserSink() = default;
/***/
void write_log_message(quill::MacroMetadata const* log_metadata, uint64_t log_timestamp,
std::string_view thread_id, std::string_view thread_name,
std::string_view logger_name, quill::LogLevel log_level,
std::vector<std::pair<std::string, std::string>> const* named_args,
std::string_view log_message) override
{
// Called by the logger backend worker thread for each LOG_* macro
// last character is '\n' and we exclude it using size() - 1
_messages.push_back(std::string{log_message.data(), log_message.size() - 1});
}
/***/
void flush_sink() noexcept override
{
// This is not called for each LOG_* invocation like the write function, instead it is called
// periodically or when there are no more LOG_* writes left to process or when logger->flush()
for (auto const& message : _messages)
{
std::cout << message << std::endl;
}
_messages.clear();
}
/***/
void run_periodic_tasks() noexcept override
{
// Executes periodic user-defined tasks. This function is frequently invoked by the backend thread's main loop.
// Avoid including heavy tasks here to prevent slowing down the backend thread.
}
private:
std::vector<std::string> _messages;
};
int main()
{
// Start the backend thread
quill::BackendOptions backend_options;
quill::Backend::start(backend_options);
// Frontend
auto file_sink = quill::Frontend::create_or_get_sink<UserSink>("sink_id_1");
quill::Logger* logger = quill::Frontend::create_or_get_logger("root", std::move(file_sink));
logger->set_log_level(quill::LogLevel::Debug);
LOG_INFO(logger, "Hello from {}", "sink example");
LOG_DEBUG(logger, "Invoking user sink flush");
logger->flush_log();
LOG_INFO(logger, "Log more {}", 123);
}

View file

@ -0,0 +1,63 @@
#include "quill/Backend.h"
#include "quill/Frontend.h"
#include "quill/LogMacros.h"
#include "quill/Logger.h"
#include "quill/sinks/ConsoleSink.h"
#include <cstdint>
#include <sstream>
#include <string>
#include <utility>
/**
* This example illustrates logging user-defined types.
*
* Starting from version 4.0.0, direct passing of user-defined and standard library types to the
* 'LOG_' macros is no longer supported. Instead, these types must be converted to strings
* before being passed to the logger.
*
* In this example, std::ostringstream is used for simplicity, but it's recommended to use modern
* formatting libraries such as std::format or fmt::format.
*/
class User
{
public:
User(std::string name, std::string surname, uint32_t age)
: name(std::move(name)), surname(std::move(surname)), age(age){};
friend std::ostream& operator<<(std::ostream& os, User const& obj)
{
os << "name: " << obj.name << ", surname: " << obj.surname << ", age: " << obj.age;
return os;
}
private:
std::string name;
std::string surname;
uint32_t age;
};
// Helper to convert to string
template <typename T>
std::string to_string(T const& obj)
{
std::ostringstream oss;
oss << obj;
return oss.str();
}
int main()
{
// Start the backend thread
quill::BackendOptions backend_options;
quill::Backend::start(backend_options);
// Frontend
auto console_sink = quill::Frontend::create_or_get_sink<quill::ConsoleSink>("sink_id_1");
quill::Logger* logger = quill::Frontend::create_or_get_logger("root", std::move(console_sink));
User user_1{"Super", "User", 1};
LOG_INFO(logger, "User is [{}]", to_string(user_1));
}

View file

@ -0,0 +1,21 @@
project('quill', 'cpp', version : '4.2.0', default_options : ['warning_level=3', 'cpp_std=c++17'])
inc_dirs = include_directories('quill/include', is_system: true)
# Conditional compiler arguments for specific compiler
interface_compile_args = meson.get_compiler('cpp').get_supported_arguments('-Wno-gnu-zero-variadic-macro-arguments')
quill_dep = declare_dependency(include_directories : inc_dirs,
dependencies : [dependency('threads')],
compile_args : interface_compile_args)
install_subdir('quill/include/quill', install_dir: get_option('includedir'), install_tag: 'devel')
pkg_mod = import('pkgconfig')
pkg_mod.generate(
name : 'quill',
filebase : 'quill',
description : 'Asynchronous Low Latency C++ Logging Library',
subdirs : 'quill'
)

View file

@ -0,0 +1,248 @@
if (NOT QUILL_MASTER_PROJECT)
find_package(Threads REQUIRED)
endif ()
# library name
set(TARGET_NAME quill)
# header files
set(HEADER_FILES
include/quill/backend/BackendManager.h
include/quill/backend/BackendOptions.h
include/quill/backend/BackendWorker.h
include/quill/backend/BacktraceStorage.h
include/quill/backend/PatternFormatter.h
include/quill/backend/RdtscClock.h
include/quill/backend/SignalHandler.h
include/quill/backend/StringFromTime.h
include/quill/backend/TimestampFormatter.h
include/quill/backend/TransitEvent.h
include/quill/backend/TransitEventBuffer.h
include/quill/backend/BackendUtilities.h
include/quill/bundled/fmt/args.h
include/quill/bundled/fmt/chrono.h
include/quill/bundled/fmt/color.h
include/quill/bundled/fmt/compile.h
include/quill/bundled/fmt/core.h
include/quill/bundled/fmt/format.h
include/quill/bundled/fmt/format-inl.h
include/quill/bundled/fmt/os.h
include/quill/bundled/fmt/ostream.h
include/quill/bundled/fmt/printf.h
include/quill/bundled/fmt/ranges.h
include/quill/bundled/fmt/std.h
include/quill/bundled/fmt/xchar.h
include/quill/core/Attributes.h
include/quill/core/BoundedSPSCQueue.h
include/quill/core/Common.h
include/quill/core/DynamicFormatArgStore.h
include/quill/core/Codec.h
include/quill/core/Filesystem.h
include/quill/core/FormatBuffer.h
include/quill/core/FrontendOptions.h
include/quill/core/LoggerBase.h
include/quill/core/LoggerManager.h
include/quill/core/LogLevel.h
include/quill/core/MacroMetadata.h
include/quill/core/MathUtils.h
include/quill/core/QuillError.h
include/quill/core/Rdtsc.h
include/quill/core/SinkManager.h
include/quill/core/ThreadContextManager.h
include/quill/core/ThreadUtilities.h
include/quill/core/TimeUtilities.h
include/quill/core/UnboundedSPSCQueue.h
include/quill/filters/Filter.h
include/quill/sinks/ConsoleSink.h
include/quill/sinks/FileSink.h
include/quill/sinks/JsonConsoleSink.h
include/quill/sinks/JsonFileSink.h
include/quill/sinks/NullSink.h
include/quill/sinks/RotatingFileSink.h
include/quill/sinks/Sink.h
include/quill/sinks/StreamSink.h
include/quill/std/Array.h
include/quill/std/Deque.h
include/quill/std/FilesystemPath.h
include/quill/std/ForwardList.h
include/quill/std/List.h
include/quill/std/Map.h
include/quill/std/Optional.h
include/quill/std/Pair.h
include/quill/std/Set.h
include/quill/std/Tuple.h
include/quill/std/UnorderedMap.h
include/quill/std/UnorderedSet.h
include/quill/std/Vector.h
include/quill/Backend.h
include/quill/BackendTscClock.h
include/quill/Frontend.h
include/quill/Logger.h
include/quill/LogMacros.h
include/quill/UserClockSource.h
include/quill/Utility.h
)
# Add as a library
add_library(${TARGET_NAME} INTERFACE)
add_library(${TARGET_NAME}::${TARGET_NAME} ALIAS ${TARGET_NAME})
if (QUILL_NO_EXCEPTIONS)
target_compile_definitions(${TARGET_NAME} PUBLIC INTERFACE -DQUILL_NO_EXCEPTIONS)
if (NOT MSVC)
target_compile_options(${TARGET_NAME} PUBLIC INTERFACE -fno-exceptions)
endif ()
endif ()
if (QUILL_NO_THREAD_NAME_SUPPORT)
target_compile_definitions(${TARGET_NAME} PUBLIC INTERFACE -DQUILL_NO_THREAD_NAME_SUPPORT)
endif ()
if (QUILL_X86ARCH)
target_compile_definitions(${TARGET_NAME} PUBLIC INTERFACE -DQUILL_X86ARCH)
endif ()
if (QUILL_DISABLE_NON_PREFIXED_MACROS)
target_compile_definitions(${TARGET_NAME} PUBLIC INTERFACE -DQUILL_DISABLE_NON_PREFIXED_MACROS)
endif ()
# Add target sources
target_sources(${TARGET_NAME} PRIVATE ${HEADER_FILES})
# Link dependencies
target_link_libraries(${TARGET_NAME} PUBLIC INTERFACE Threads::Threads)
if (MINGW)
# strftime requires this when using MinGw to correctly format the time ..
target_link_libraries(${TARGET_NAME} PUBLIC INTERFACE ucrtbase)
endif ()
if (CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 9.0)
target_link_libraries(${TARGET_NAME} PUBLIC INTERFACE stdc++fs)
endif ()
# Add include directories for this library
target_include_directories(${TARGET_NAME}
INTERFACE
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:include>)
# Properties
set_target_properties(${TARGET_NAME} PROPERTIES VERSION ${QUILL_VERSION} SOVERSION ${QUILL_VERSION})
# ---- Tests ---- #
if (QUILL_BUILD_TESTS AND EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/test)
add_subdirectory(test)
endif ()
if (QUILL_MASTER_PROJECT OR QUILL_ENABLE_INSTALL)
# ---- Install ---- #
include(GNUInstallDirs)
include(CMakePackageConfigHelpers)
set(version_config ${PROJECT_BINARY_DIR}/quill-config-version.cmake)
set(project_config ${PROJECT_BINARY_DIR}/quill-config.cmake)
set(pkgconfig ${PROJECT_BINARY_DIR}/quill.pc)
set(targets_export_name quill-targets)
set(QUILL_CMAKE_DIR ${CMAKE_INSTALL_LIBDIR}/cmake/quill CACHE STRING
"Installation directory for cmake files, relative to ${CMAKE_INSTALL_PREFIX}.")
set(QUILL_LIB_DIR ${CMAKE_INSTALL_LIBDIR} CACHE STRING
"Installation directory for libraries, relative to ${CMAKE_INSTALL_PREFIX}.")
set(QUILL_INC_DIR ${CMAKE_INSTALL_INCLUDEDIR} CACHE STRING
"Installation directory for include files, relative to ${CMAKE_INSTALL_PREFIX}.")
set(QUILL_PKGCONFIG_DIR ${CMAKE_INSTALL_PREFIX}/pkgconfig CACHE PATH
"Installation directory for pkgconfig (.pc) files, relative to ${CMAKE_INSTALL_PREFIX}.")
# Generate pkgconfig
configure_file(
"${CMAKE_CURRENT_LIST_DIR}/cmake/quill.pc.in"
"${pkgconfig}"
@ONLY)
# Copy pkgconfig
install(FILES "${pkgconfig}" DESTINATION "${QUILL_PKGCONFIG_DIR}")
# Generate the version, config and target files into the build directory.
write_basic_package_version_file(
${version_config}
VERSION ${QUILL_VERSION}
COMPATIBILITY AnyNewerVersion)
configure_package_config_file(
${CMAKE_CURRENT_LIST_DIR}/cmake/quill-config.cmake.in
${project_config}
INSTALL_DESTINATION ${QUILL_CMAKE_DIR})
# Install version, config files
install(FILES ${project_config} ${version_config}
DESTINATION ${QUILL_CMAKE_DIR})
# Install the headers
install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/include/quill DESTINATION ${QUILL_INC_DIR})
# Install the library
install(TARGETS ${TARGET_NAME} EXPORT ${targets_export_name}
LIBRARY DESTINATION ${QUILL_LIB_DIR}
ARCHIVE DESTINATION ${QUILL_LIB_DIR}
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
# Export the library
install(EXPORT ${targets_export_name} DESTINATION ${QUILL_CMAKE_DIR}
NAMESPACE quill::)
# Install the examples
if (QUILL_BUILD_EXAMPLES)
install(TARGETS
quill_example_use_overwrite_macros
quill_example_recommended_usage
quill_example_backend_thread_notify
quill_example_backtrace_logging
quill_example_bounded_dropping_queue_frontend
quill_example_console_logging
quill_example_custom_console_colours
quill_example_rotating_file_logging
quill_example_signal_handler
quill_example_logger_removal_with_file_event_notifier
quill_example_custom_frontend_options
quill_example_file_logging
quill_example_filter_logging
quill_example_system_clock_logging
quill_example_user_clock_source
quill_example_user_defined_filter
quill_example_user_defined_sink
quill_example_tags_logging
quill_example_json_sink_logging
quill_example_user_defined_types_logging
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
endif ()
# ---- Packaging ---- #
set(CPACK_GENERATOR ZIP)
set(CPACK_INCLUDE_TOPLEVEL_DIRECTORY 0)
set(CPACK_INSTALL_CMAKE_PROJECTS "${CMAKE_BINARY_DIR}" "${PROJECT_NAME}" ALL .)
set(CPACK_PROJECT_URL "https://github.com/odygrd/quill")
set(CPACK_PACKAGE_VENDOR "Odysseas Georgoudis")
set(CPACK_PACKAGE_CONTACT "Odysseas Odysseas <odygrd@hotmail.com>")
set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Asynchronous Low Latency C++ Logging Library")
set(CPACK_PACKAGE_VERSION_MAJOR ${PROJECT_VERSION_MAJOR})
set(CPACK_PACKAGE_VERSION_MINOR ${PROJECT_VERSION_MINOR})
set(CPACK_PACKAGE_VERSION_PATCH ${PROJECT_VERSION_PATCH})
set(CPACK_PACKAGE_VERSION ${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}.${PROJECT_VERSION_PATCH})
set(CPACK_PACKAGE_RELOCATABLE ON)
set(CPACK_RPM_PACKAGE_LICENSE "MIT")
set(CPACK_RPM_PACKAGE_GROUP "System Environment/Libraries")
set(CPACK_RPM_PACKAGE_URL ${CPACK_PROJECT_URL})
set(CPACK_RPM_PACKAGE_DESCRIPTION "Asynchronous Low Latency C++ Logging Library")
include(CPack)
endif ()

View file

@ -0,0 +1,157 @@
# 2012-01-31, Lars Bilke
# - Enable Code Coverage
#
# 2013-09-17, Joakim Söderberg
# - Added support for Clang.
# - Some additional usage instructions.
#
# 2018-03-31, Bendik Samseth
# - Relax debug output.
# - Keep a copy of the coverage output for later use.
# - Updated coverage exclude patterns.
#
# 2018-01-03, HenryRLee
# - Allow for *Clang compiler names, not just Clang.
#
# 2018-01-03, Bendik Samseth
# - Only check compiler compatibility if in a coverage build.
#
#
# USAGE:
# 0. (Mac only) If you use Xcode 5.1 make sure to patch geninfo as described here:
# http://stackoverflow.com/a/22404544/80480
#
# 1. Copy this file into your cmake modules path.
#
# 2. Add the following line to your CMakeLists.txt:
# INCLUDE(CodeCoverage)
#
# 3. Set compiler flags to turn off optimization and enable coverage:
# SET(CMAKE_CXX_FLAGS "-g -O0 -fprofile-arcs -ftest-coverage")
# SET(CMAKE_C_FLAGS "-g -O0 -fprofile-arcs -ftest-coverage")
#
# 3. Use the function SETUP_TARGET_FOR_COVERAGE to create a custom make target
# which runs your test executable and produces a lcov code coverage report:
# Example:
# SETUP_TARGET_FOR_COVERAGE(
# my_coverage_target # Name for custom target.
# test_driver # Name of the test driver executable that runs the tests.
# # NOTE! This should always have a ZERO as exit code
# # otherwise the coverage generation will not complete.
# coverage # Name of output directory.
# )
#
# 4. Build a Debug build:
# cmake -DCMAKE_BUILD_TYPE=Debug ..
# make
# make my_coverage_target
#
#
# Param _targetname The name of new the custom make target
# Param _testrunner The name of the target which runs the tests.
# MUST return ZERO always, even on errors.
# If not, no coverage report will be created!
# Param _outputname lcov output is generated as _outputname.info
# HTML report is generated in _outputname/index.html
# Optional fourth parameter is passed as arguments to _testrunner
# Pass them in list form, e.g.: "-j;2" for -j 2
FUNCTION(SETUP_TARGET_FOR_COVERAGE _targetname _testrunner _outputname)
IF (NOT LCOV_PATH)
MESSAGE(FATAL_ERROR "lcov not found! Aborting...")
ENDIF () # NOT LCOV_PATH
IF (NOT GENHTML_PATH)
MESSAGE(FATAL_ERROR "genhtml not found! Aborting...")
ENDIF () # NOT GENHTML_PATH
# Setup target
ADD_CUSTOM_TARGET(${_targetname}
# Cleanup lcov
${LCOV_PATH} --directory . --zerocounters
# Run tests
COMMAND ${_testrunner} ${ARGV3}
# Capturing lcov counters and generating report
COMMAND ${LCOV_PATH} --directory . --capture --output-file ${_outputname}.info
COMMAND ${LCOV_PATH} --remove ${_outputname}.info '*/tests/*' '/usr/*' '*/external/*' '/Applications/*' --output-file ${_outputname}.info.cleaned
COMMAND ${GENHTML_PATH} -o ${_outputname} ${_outputname}.info.cleaned
COMMAND ${LCOV_PATH} --list ${_outputname}.info.cleaned
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
COMMENT "Resetting code coverage counters to zero.\nProcessing code coverage counters and generating report."
)
# Show info where to find the report
ADD_CUSTOM_COMMAND(TARGET ${_targetname} POST_BUILD
COMMAND ;
COMMENT "${BoldMagenta}Open ./${_outputname}/index.html in your browser to view the coverage report.${ColourReset}"
)
ENDFUNCTION() # SETUP_TARGET_FOR_COVERAGE
string(TOLOWER "${CMAKE_BUILD_TYPE}" cmake_build_type_tolower)
if (cmake_build_type_tolower STREQUAL "coverage")
# Check prereqs
FIND_PROGRAM(GCOV_PATH gcov)
FIND_PROGRAM(LCOV_PATH lcov)
FIND_PROGRAM(GENHTML_PATH genhtml)
FIND_PROGRAM(GCOVR_PATH gcovr PATHS ${CMAKE_SOURCE_DIR}/tests)
IF (NOT GCOV_PATH)
MESSAGE(FATAL_ERROR "gcov not found! Aborting...")
ENDIF () # NOT GCOV_PATH
IF (NOT CMAKE_COMPILER_IS_GNUCXX)
IF (NOT "${CMAKE_CXX_COMPILER_ID}" MATCHES ".*Clang")
MESSAGE(FATAL_ERROR "Compiler is not GNU gcc or Clang! Aborting...")
ENDIF ()
ENDIF () # NOT CMAKE_COMPILER_IS_GNUCXX
SET(CMAKE_CXX_FLAGS_COVERAGE
"-g -O0 -fprofile-arcs -ftest-coverage"
CACHE STRING "Flags used by the C++ compiler during coverage builds."
FORCE)
SET(CMAKE_C_FLAGS_COVERAGE
"-g -O0 -fprofile-arcs -ftest-coverage"
CACHE STRING "Flags used by the C compiler during coverage builds."
FORCE)
SET(CMAKE_EXE_LINKER_FLAGS_COVERAGE
""
CACHE STRING "Flags used for linking binaries during coverage builds."
FORCE)
SET(CMAKE_SHARED_LINKER_FLAGS_COVERAGE
""
CACHE STRING "Flags used by the shared libraries linker during coverage builds."
FORCE)
MARK_AS_ADVANCED(
CMAKE_CXX_FLAGS_COVERAGE
CMAKE_C_FLAGS_COVERAGE
CMAKE_EXE_LINKER_FLAGS_COVERAGE
CMAKE_SHARED_LINKER_FLAGS_COVERAGE)
# If unwanted files are included in the coverage reports, you can
# adjust the exclude patterns on line 83.
SETUP_TARGET_FOR_COVERAGE(
coverage # Name for custom target.
${TEST_NAME} # Name of the test driver executable that runs the tests.
# NOTE! This should always have a ZERO as exit code
# otherwise the coverage generation will not complete.
coverage_out # Name of output directory.
)
else ()
add_custom_target(coverage
COMMAND echo "${Red}Code coverage only available in coverage builds."
COMMAND echo "${Green}Make a new build directory and rerun cmake with -DCMAKE_BUILD_TYPE=Coverage to enable this target.${ColorReset}"
)
endif ()

Some files were not shown because too many files have changed in this diff Show more