1// Copyright(c) 2015-present, Gabi Melman & spdlog contributors.
2// Distributed under the MIT License (http://opensource.org/licenses/MIT)
3
4#pragma once
5
6#ifndef SPDLOG_HEADER_ONLY
7# include <spdlog/details/thread_pool.h>
8#endif
9
10#include <spdlog/common.h>
11#include <cassert>
12
13namespace spdlog {
14namespace details {
15
16SPDLOG_INLINE thread_pool::thread_pool(
17 size_t q_max_items, size_t threads_n, std::function<void()> on_thread_start, std::function<void()> on_thread_stop)
18 : q_(q_max_items)
19{
20 if (threads_n == 0 || threads_n > 1000)
21 {
22 throw_spdlog_ex("spdlog::thread_pool(): invalid threads_n param (valid "
23 "range is 1-1000)");
24 }
25 for (size_t i = 0; i < threads_n; i++)
26 {
27 threads_.emplace_back([this, on_thread_start, on_thread_stop] {
28 on_thread_start();
29 this->thread_pool::worker_loop_();
30 on_thread_stop();
31 });
32 }
33}
34
35SPDLOG_INLINE thread_pool::thread_pool(size_t q_max_items, size_t threads_n, std::function<void()> on_thread_start)
36 : thread_pool(q_max_items, threads_n, on_thread_start, [] {})
37{}
38
39SPDLOG_INLINE thread_pool::thread_pool(size_t q_max_items, size_t threads_n)
40 : thread_pool(
41 q_max_items, threads_n, [] {}, [] {})
42{}
43
44// message all threads to terminate gracefully join them
45SPDLOG_INLINE thread_pool::~thread_pool()
46{
47 SPDLOG_TRY
48 {
49 for (size_t i = 0; i < threads_.size(); i++)
50 {
51 post_async_msg_(async_msg(async_msg_type::terminate), async_overflow_policy::block);
52 }
53
54 for (auto &t : threads_)
55 {
56 t.join();
57 }
58 }
59 SPDLOG_CATCH_STD
60}
61
62void SPDLOG_INLINE thread_pool::post_log(async_logger_ptr &&worker_ptr, const details::log_msg &msg, async_overflow_policy overflow_policy)
63{
64 async_msg async_m(std::move(worker_ptr), async_msg_type::log, msg);
65 post_async_msg_(std::move(async_m), overflow_policy);
66}
67
68void SPDLOG_INLINE thread_pool::post_flush(async_logger_ptr &&worker_ptr, async_overflow_policy overflow_policy)
69{
70 post_async_msg_(async_msg(std::move(worker_ptr), async_msg_type::flush), overflow_policy);
71}
72
73size_t SPDLOG_INLINE thread_pool::overrun_counter()
74{
75 return q_.overrun_counter();
76}
77
78void SPDLOG_INLINE thread_pool::reset_overrun_counter()
79{
80 q_.reset_overrun_counter();
81}
82
83size_t SPDLOG_INLINE thread_pool::queue_size()
84{
85 return q_.size();
86}
87
88void SPDLOG_INLINE thread_pool::post_async_msg_(async_msg &&new_msg, async_overflow_policy overflow_policy)
89{
90 if (overflow_policy == async_overflow_policy::block)
91 {
92 q_.enqueue(std::move(new_msg));
93 }
94 else
95 {
96 q_.enqueue_nowait(std::move(new_msg));
97 }
98}
99
100void SPDLOG_INLINE thread_pool::worker_loop_()
101{
102 while (process_next_msg_()) {}
103}
104
105// process next message in the queue
106// return true if this thread should still be active (while no terminate msg
107// was received)
108bool SPDLOG_INLINE thread_pool::process_next_msg_()
109{
110 async_msg incoming_async_msg;
111 bool dequeued = q_.dequeue_for(incoming_async_msg, std::chrono::seconds(10));
112 if (!dequeued)
113 {
114 return true;
115 }
116
117 switch (incoming_async_msg.msg_type)
118 {
119 case async_msg_type::log: {
120 incoming_async_msg.worker_ptr->backend_sink_it_(incoming_async_msg);
121 return true;
122 }
123 case async_msg_type::flush: {
124 incoming_async_msg.worker_ptr->backend_flush_();
125 return true;
126 }
127
128 case async_msg_type::terminate: {
129 return false;
130 }
131
132 default: {
133 assert(false);
134 }
135 }
136
137 return true;
138}
139
140} // namespace details
141} // namespace spdlog
142