mirror of https://github.com/Icinga/icinga2.git
Make the event queue adaptive.
This commit is contained in:
parent
27ceabf3c0
commit
d04a04d897
|
@ -29,23 +29,13 @@
|
||||||
using namespace icinga;
|
using namespace icinga;
|
||||||
|
|
||||||
EventQueue::EventQueue(void)
|
EventQueue::EventQueue(void)
|
||||||
: m_Stopped(false)
|
: m_Stopped(false), m_ThreadDeaths(0), m_Latency(0), m_LatencyCount(0)
|
||||||
{
|
{
|
||||||
m_ThreadCount = boost::thread::hardware_concurrency();
|
for (int i = 0; i < sizeof(m_ThreadStates) / sizeof(m_ThreadStates[0]); i++)
|
||||||
|
m_ThreadStates[i] = ThreadDead;
|
||||||
|
|
||||||
if (m_ThreadCount == 0)
|
for (int i = 0; i < 8; i++)
|
||||||
m_ThreadCount = 1;
|
SpawnWorker();
|
||||||
|
|
||||||
m_ThreadCount *= 8;
|
|
||||||
|
|
||||||
m_ThreadCount = 128;
|
|
||||||
|
|
||||||
m_States = new ThreadState[m_ThreadCount];
|
|
||||||
|
|
||||||
for (int i = 0; i < m_ThreadCount; i++) {
|
|
||||||
m_States[i] = ThreadIdle;
|
|
||||||
m_Threads.create_thread(boost::bind(&EventQueue::QueueThreadProc, this, i));
|
|
||||||
}
|
|
||||||
|
|
||||||
boost::thread reportThread(boost::bind(&EventQueue::ReportThreadProc, this));
|
boost::thread reportThread(boost::bind(&EventQueue::ReportThreadProc, this));
|
||||||
reportThread.detach();
|
reportThread.detach();
|
||||||
|
@ -69,7 +59,13 @@ void EventQueue::Stop(void)
|
||||||
*/
|
*/
|
||||||
void EventQueue::Join(void)
|
void EventQueue::Join(void)
|
||||||
{
|
{
|
||||||
m_Threads.join_all();
|
boost::mutex::scoped_lock lock(m_Mutex);
|
||||||
|
|
||||||
|
while (!m_Stopped || !m_Events.empty()) {
|
||||||
|
lock.unlock();
|
||||||
|
Utility::Sleep(0.5);
|
||||||
|
lock.lock();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -83,18 +79,26 @@ void EventQueue::QueueThreadProc(int tid)
|
||||||
{
|
{
|
||||||
boost::mutex::scoped_lock lock(m_Mutex);
|
boost::mutex::scoped_lock lock(m_Mutex);
|
||||||
|
|
||||||
m_States[tid] = ThreadIdle;
|
m_ThreadStates[tid] = ThreadIdle;
|
||||||
|
|
||||||
while (m_Events.empty() && !m_Stopped)
|
while (m_Events.empty() && !m_Stopped && m_ThreadDeaths == 0)
|
||||||
m_CV.wait(lock);
|
m_CV.wait(lock);
|
||||||
|
|
||||||
|
if (m_ThreadDeaths > 0) {
|
||||||
|
m_ThreadDeaths--;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
if (m_Events.empty() && m_Stopped)
|
if (m_Events.empty() && m_Stopped)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
event = m_Events.front();
|
event = m_Events.front();
|
||||||
m_Events.pop_front();
|
m_Events.pop_front();
|
||||||
|
|
||||||
m_States[tid] = ThreadBusy;
|
m_ThreadStates[tid] = ThreadBusy;
|
||||||
|
|
||||||
|
m_Latency += Utility::GetTime() - event.Timestamp;
|
||||||
|
m_LatencyCount++;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef _DEBUG
|
#ifdef _DEBUG
|
||||||
|
@ -150,6 +154,8 @@ void EventQueue::QueueThreadProc(int tid)
|
||||||
}
|
}
|
||||||
#endif /* _DEBUG */
|
#endif /* _DEBUG */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
m_ThreadStates[tid] = ThreadDead;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -161,6 +167,9 @@ void EventQueue::Post(const EventQueueCallback& callback)
|
||||||
{
|
{
|
||||||
boost::mutex::scoped_lock lock(m_Mutex);
|
boost::mutex::scoped_lock lock(m_Mutex);
|
||||||
|
|
||||||
|
if (m_Stopped)
|
||||||
|
BOOST_THROW_EXCEPTION(std::runtime_error("EventQueue has been stopped."));
|
||||||
|
|
||||||
EventQueueWorkItem event;
|
EventQueueWorkItem event;
|
||||||
event.Callback = callback;
|
event.Callback = callback;
|
||||||
event.Timestamp = Utility::GetTime();
|
event.Timestamp = Utility::GetTime();
|
||||||
|
@ -171,43 +180,79 @@ void EventQueue::Post(const EventQueueCallback& callback)
|
||||||
|
|
||||||
void EventQueue::ReportThreadProc(void)
|
void EventQueue::ReportThreadProc(void)
|
||||||
{
|
{
|
||||||
|
double last_adjustment = 0;
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
Utility::Sleep(5);
|
Utility::Sleep(5);
|
||||||
|
|
||||||
double now = Utility::GetTime();
|
double now = Utility::GetTime();
|
||||||
|
|
||||||
int pending, busy;
|
int pending, alive, busy;
|
||||||
double max_latency, avg_latency;
|
double avg_latency;
|
||||||
|
|
||||||
{
|
{
|
||||||
boost::mutex::scoped_lock lock(m_Mutex);
|
boost::mutex::scoped_lock lock(m_Mutex);
|
||||||
pending = m_Events.size();
|
pending = m_Events.size();
|
||||||
|
|
||||||
|
alive = 0;
|
||||||
busy = 0;
|
busy = 0;
|
||||||
|
|
||||||
for (int i = 0; i < m_ThreadCount; i++) {
|
for (int i = 0; i < sizeof(m_ThreadStates) / sizeof(m_ThreadStates[0]); i++) {
|
||||||
if (m_States[i] == ThreadBusy)
|
if (m_ThreadStates[i] != ThreadDead)
|
||||||
|
alive++;
|
||||||
|
|
||||||
|
if (m_ThreadStates[i] == ThreadBusy)
|
||||||
busy++;
|
busy++;
|
||||||
}
|
}
|
||||||
|
|
||||||
max_latency = 0;
|
if (m_LatencyCount > 0)
|
||||||
avg_latency = 0;
|
avg_latency = m_Latency / (m_LatencyCount * 1.0);
|
||||||
|
else
|
||||||
|
avg_latency = 0;
|
||||||
|
|
||||||
BOOST_FOREACH(const EventQueueWorkItem& event, m_Events) {
|
m_Latency = 0;
|
||||||
double latency = now - event.Timestamp;
|
m_LatencyCount = 0;
|
||||||
|
|
||||||
avg_latency += latency;
|
if (pending > 0) {
|
||||||
|
/* Spawn a few additional workers. */
|
||||||
if (latency > max_latency)
|
for (int i = 0; i < 2; i++)
|
||||||
max_latency = latency;
|
SpawnWorker();
|
||||||
|
} else if (last_adjustment < now - 30) {
|
||||||
|
KillWorker();
|
||||||
|
last_adjustment = now;
|
||||||
}
|
}
|
||||||
|
|
||||||
avg_latency /= pending;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Log(LogInformation, "base", "Pending tasks: " + Convert::ToString(pending) + "; Busy threads: " +
|
std::ostringstream msgbuf;
|
||||||
Convert::ToString(busy) + "; Idle threads: " + Convert::ToString(m_ThreadCount - busy) +
|
msgbuf << "Pending tasks: " << pending << "; Busy threads: " << busy << "; Idle threads: " << alive - busy << "; Average latency: " << (long)(avg_latency * 1000) << "ms";
|
||||||
"; Maximum latency: " + Convert::ToString((long)max_latency * 1000) + "ms"
|
Log(LogInformation, "base", msgbuf.str());
|
||||||
"; Average latency: " + Convert::ToString((long)avg_latency * 1000) + "ms");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Note: Caller must hold m_Mutex
|
||||||
|
*/
|
||||||
|
void EventQueue::SpawnWorker(void)
|
||||||
|
{
|
||||||
|
for (int i = 0; i < sizeof(m_ThreadStates) / sizeof(m_ThreadStates[0]); i++) {
|
||||||
|
if (m_ThreadStates[i] == ThreadDead) {
|
||||||
|
Log(LogInformation, "debug", "Spawning worker thread.");
|
||||||
|
|
||||||
|
m_ThreadStates[i] = ThreadIdle;
|
||||||
|
boost::thread worker(boost::bind(&EventQueue::QueueThreadProc, this, i));
|
||||||
|
worker.detach();
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Note: Caller must hold m_Mutex.
|
||||||
|
*/
|
||||||
|
void EventQueue::KillWorker(void)
|
||||||
|
{
|
||||||
|
Log(LogInformation, "base", "Killing worker thread.");
|
||||||
|
|
||||||
|
m_ThreadDeaths++;
|
||||||
|
}
|
||||||
|
|
|
@ -32,6 +32,7 @@ namespace icinga
|
||||||
|
|
||||||
enum ThreadState
|
enum ThreadState
|
||||||
{
|
{
|
||||||
|
ThreadDead,
|
||||||
ThreadIdle,
|
ThreadIdle,
|
||||||
ThreadBusy
|
ThreadBusy
|
||||||
};
|
};
|
||||||
|
@ -61,9 +62,11 @@ public:
|
||||||
void Post(const EventQueueCallback& callback);
|
void Post(const EventQueueCallback& callback);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
boost::thread_group m_Threads;
|
ThreadState m_ThreadStates[512];
|
||||||
ThreadState *m_States;
|
int m_ThreadDeaths;
|
||||||
int m_ThreadCount;
|
|
||||||
|
double m_Latency;
|
||||||
|
int m_LatencyCount;
|
||||||
|
|
||||||
boost::mutex m_Mutex;
|
boost::mutex m_Mutex;
|
||||||
boost::condition_variable m_CV;
|
boost::condition_variable m_CV;
|
||||||
|
@ -73,6 +76,9 @@ private:
|
||||||
|
|
||||||
void QueueThreadProc(int tid);
|
void QueueThreadProc(int tid);
|
||||||
void ReportThreadProc(void);
|
void ReportThreadProc(void);
|
||||||
|
|
||||||
|
void SpawnWorker(void);
|
||||||
|
void KillWorker(void);
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue