icinga2/lib/base/threadpool.cpp

389 lines
10 KiB
C++
Raw Normal View History

2012-06-24 02:56:48 +02:00
/******************************************************************************
* Icinga 2 *
* Copyright (C) 2012-2014 Icinga Development Team (http://www.icinga.org) *
2012-06-24 02:56:48 +02:00
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of the GNU General Public License *
* as published by the Free Software Foundation; either version 2 *
* of the License, or (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the Free Software Foundation *
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. *
******************************************************************************/
2014-05-25 16:23:35 +02:00
#include "base/threadpool.hpp"
2014-10-19 14:21:12 +02:00
#include "base/logger.hpp"
2014-05-25 16:23:35 +02:00
#include "base/debug.hpp"
#include "base/utility.hpp"
#include "base/exception.hpp"
#include "base/application.hpp"
2013-03-15 18:21:29 +01:00
#include <boost/bind.hpp>
#include <iostream>
2012-06-24 02:56:48 +02:00
using namespace icinga;
int ThreadPool::m_NextID = 1;
ThreadPool::ThreadPool(size_t max_threads)
: m_ID(m_NextID++), m_MaxThreads(max_threads), m_Stopped(false)
2013-02-18 14:40:24 +01:00
{
if (m_MaxThreads != UINT_MAX && m_MaxThreads < sizeof(m_Queues) / sizeof(m_Queues[0]))
m_MaxThreads = sizeof(m_Queues) / sizeof(m_Queues[0]);
2013-02-26 10:13:54 +01:00
Start();
2013-02-18 14:40:24 +01:00
}
2012-07-13 23:33:30 +02:00
2013-03-25 18:36:15 +01:00
ThreadPool::~ThreadPool(void)
2013-02-15 06:47:26 +01:00
{
2013-02-17 19:14:34 +01:00
Stop();
}
void ThreadPool::Start(void)
{
for (size_t i = 0; i < sizeof(m_Queues) / sizeof(m_Queues[0]); i++)
m_Queues[i].SpawnWorker(m_ThreadGroup);
m_MgmtThread = boost::thread(boost::bind(&ThreadPool::ManagerThreadProc, this));
2013-02-15 06:47:26 +01:00
}
2013-03-25 18:36:15 +01:00
void ThreadPool::Stop(void)
2013-02-15 06:47:26 +01:00
{
2014-11-13 10:00:49 +01:00
{
boost::mutex::scoped_lock lock(m_MgmtMutex);
m_Stopped = true;
m_MgmtCV.notify_all();
}
2014-11-13 10:00:49 +01:00
m_MgmtThread.join();
2013-04-19 12:58:16 +02:00
for (size_t i = 0; i < sizeof(m_Queues) / sizeof(m_Queues[0]); i++) {
boost::mutex::scoped_lock lock(m_Queues[i].Mutex);
2014-11-13 10:00:49 +01:00
m_Queues[i].Stopped = true;
m_Queues[i].CV.notify_all();
}
2014-11-13 10:00:49 +01:00
m_ThreadGroup.join_all();
m_Stopped = false;
2013-02-17 19:14:34 +01:00
}
2012-06-24 02:56:48 +02:00
2013-02-17 19:14:34 +01:00
/**
2013-03-25 18:36:15 +01:00
* Waits for work items and processes them.
2013-02-17 19:14:34 +01:00
*/
void ThreadPool::WorkerThread::ThreadProc(Queue& queue)
2013-02-17 19:14:34 +01:00
{
2013-03-25 18:36:15 +01:00
std::ostringstream idbuf;
idbuf << "Q #" << &queue << " W #" << this;
2013-03-25 18:36:15 +01:00
Utility::SetThreadName(idbuf.str());
2013-02-19 23:02:08 +01:00
for (;;) {
2013-03-25 18:36:15 +01:00
WorkItem wi;
2012-06-24 02:56:48 +02:00
2013-02-17 19:14:34 +01:00
{
boost::mutex::scoped_lock lock(queue.Mutex);
UpdateUtilization(ThreadIdle);
2013-02-17 19:14:34 +01:00
while (queue.Items.empty() && !queue.Stopped && !Zombie) {
if (queue.Items.empty())
queue.CVStarved.notify_all();
queue.CV.wait(lock);
}
2013-02-17 19:14:34 +01:00
if (Zombie)
2013-03-23 12:23:13 +01:00
break;
if (queue.Items.empty() && queue.Stopped)
2013-02-19 23:02:08 +01:00
break;
wi = queue.Items.front();
queue.Items.pop_front();
UpdateUtilization(ThreadBusy);
2013-02-17 19:14:34 +01:00
}
double st = Utility::GetTime();;
2013-03-06 11:03:50 +01:00
#ifdef _DEBUG
# ifdef RUSAGE_THREAD
struct rusage usage_start, usage_end;
2013-03-06 11:03:50 +01:00
(void) getrusage(RUSAGE_THREAD, &usage_start);
# endif /* RUSAGE_THREAD */
2013-03-06 11:03:50 +01:00
#endif /* _DEBUG */
2012-08-03 13:19:55 +02:00
try {
if (wi.Callback)
wi.Callback();
} catch (const std::exception& ex) {
2014-10-19 17:52:17 +02:00
Log(LogCritical, "ThreadPool")
<< "Exception thrown in event handler:\n"
<< DiagnosticInformation(ex);
} catch (...) {
Log(LogCritical, "ThreadPool", "Exception of unknown type thrown in event handler.");
}
2012-08-03 13:19:55 +02:00
double et = Utility::GetTime();
2013-03-25 18:36:15 +01:00
double latency = st - wi.Timestamp;
{
boost::mutex::scoped_lock lock(queue.Mutex);
queue.WaitTime += latency;
queue.ServiceTime += et - st;
queue.TaskCount++;
}
#ifdef _DEBUG
# ifdef RUSAGE_THREAD
(void) getrusage(RUSAGE_THREAD, &usage_end);
2013-03-06 11:03:50 +01:00
double duser = (usage_end.ru_utime.tv_sec - usage_start.ru_utime.tv_sec) +
(usage_end.ru_utime.tv_usec - usage_start.ru_utime.tv_usec) / 1000000.0;
2013-03-06 11:03:50 +01:00
double dsys = (usage_end.ru_stime.tv_sec - usage_start.ru_stime.tv_sec) +
(usage_end.ru_stime.tv_usec - usage_start.ru_stime.tv_usec) / 1000000.0;
2013-03-06 11:03:50 +01:00
double dwait = (et - st) - (duser + dsys);
2013-03-06 11:03:50 +01:00
int dminfaults = usage_end.ru_minflt - usage_start.ru_minflt;
int dmajfaults = usage_end.ru_majflt - usage_start.ru_majflt;
2013-03-06 11:03:50 +01:00
int dvctx = usage_end.ru_nvcsw - usage_start.ru_nvcsw;
int divctx = usage_end.ru_nivcsw - usage_start.ru_nivcsw;
# endif /* RUSAGE_THREAD */
if (et - st > 0.5) {
2014-10-19 17:52:17 +02:00
Log(LogWarning, "ThreadPool")
# ifdef RUSAGE_THREAD
2014-10-19 17:52:17 +02:00
<< "Event call took user:" << duser << "s, system:" << dsys << "s, wait:" << dwait << "s, minor_faults:" << dminfaults << ", major_faults:" << dmajfaults << ", voluntary_csw:" << dvctx << ", involuntary_csw:" << divctx;
# else
2014-10-19 17:52:17 +02:00
<< "Event call took " << (et - st) << "s";
# endif /* RUSAGE_THREAD */
2012-08-03 13:19:55 +02:00
}
#endif /* _DEBUG */
2012-07-16 22:00:50 +02:00
}
2013-03-23 12:23:13 +01:00
boost::mutex::scoped_lock lock(queue.Mutex);
UpdateUtilization(ThreadDead);
Zombie = false;
2012-06-24 02:56:48 +02:00
}
2012-09-14 14:41:17 +02:00
/**
2013-03-25 18:36:15 +01:00
* Appends a work item to the work queue. Work items will be processed in FIFO order.
2012-09-14 14:41:17 +02:00
*
2013-03-25 18:36:15 +01:00
* @param callback The callback function for the work item.
* @param policy The scheduling policy
* @returns true if the item was queued, false otherwise.
2012-09-14 14:41:17 +02:00
*/
bool ThreadPool::Post(const ThreadPool::WorkFunction& callback, SchedulerPolicy policy)
2012-06-24 02:56:48 +02:00
{
2013-03-25 18:36:15 +01:00
WorkItem wi;
wi.Callback = callback;
wi.Timestamp = Utility::GetTime();
Queue& queue = m_Queues[Utility::Random() % (sizeof(m_Queues) / sizeof(m_Queues[0]))];
{
boost::mutex::scoped_lock lock(queue.Mutex);
if (queue.Stopped)
return false;
if (policy == LowLatencyScheduler)
queue.SpawnWorker(m_ThreadGroup);
queue.Items.push_back(wi);
queue.CV.notify_one();
}
return true;
2013-02-26 10:13:54 +01:00
}
2013-02-20 19:52:25 +01:00
2013-03-25 18:36:15 +01:00
void ThreadPool::ManagerThreadProc(void)
2013-02-26 10:13:54 +01:00
{
2013-03-25 18:36:15 +01:00
std::ostringstream idbuf;
idbuf << "TP #" << m_ID << " Manager";
2013-03-25 18:36:15 +01:00
Utility::SetThreadName(idbuf.str());
double lastStats = 0;
2013-03-06 11:03:50 +01:00
for (;;) {
size_t total_pending = 0, total_alive = 0;
double total_avg_latency = 0;
double total_utilization = 0;
2013-02-26 10:13:54 +01:00
2013-03-06 11:03:50 +01:00
{
boost::mutex::scoped_lock lock(m_MgmtMutex);
2013-04-19 12:58:16 +02:00
if (!m_Stopped)
m_MgmtCV.timed_wait(lock, boost::posix_time::milliseconds(500));
2013-04-19 12:58:16 +02:00
if (m_Stopped)
break;
}
2013-04-19 12:58:16 +02:00
for (size_t i = 0; i < sizeof(m_Queues) / sizeof(m_Queues[0]); i++) {
size_t pending, alive = 0;
double avg_latency;
double utilization = 0;
Queue& queue = m_Queues[i];
boost::mutex::scoped_lock lock(queue.Mutex);
for (size_t i = 0; i < sizeof(queue.Threads) / sizeof(queue.Threads[0]); i++)
queue.Threads[i].UpdateUtilization();
pending = queue.Items.size();
for (size_t i = 0; i < sizeof(queue.Threads) / sizeof(queue.Threads[0]); i++) {
if (queue.Threads[i].State != ThreadDead && !queue.Threads[i].Zombie) {
2013-03-23 12:23:13 +01:00
alive++;
utilization += queue.Threads[i].Utilization * 100;
}
}
2013-03-25 18:36:15 +01:00
utilization /= alive;
if (queue.TaskCount > 0)
avg_latency = queue.WaitTime / (queue.TaskCount * 1.0);
2013-03-23 12:23:13 +01:00
else
avg_latency = 0;
2013-08-29 15:48:47 +02:00
if (utilization < 60 || utilization > 80 || alive < 8) {
double wthreads = std::ceil((utilization * alive) / 80.0);
2013-08-27 15:57:00 +02:00
int tthreads = wthreads - alive;
2013-03-23 20:38:41 +01:00
/* Make sure there is at least one thread per CPU */
int ncput = std::max(static_cast<unsigned int>(Application::GetConcurrency()) / QUEUECOUNT, 4U);
if (alive + tthreads < ncput)
tthreads = ncput - alive;
/* Don't kill more than 8 threads at once. */
if (tthreads < -8)
tthreads = -8;
2013-03-25 18:36:15 +01:00
/* Spawn more workers if there are outstanding work items. */
if (tthreads > 0 && pending > 0)
tthreads = 8;
if (m_MaxThreads != UINT_MAX && (alive + tthreads) * (sizeof(m_Queues) / sizeof(m_Queues[0])) > m_MaxThreads)
tthreads = m_MaxThreads / (sizeof(m_Queues) / sizeof(m_Queues[0])) - alive;
if (tthreads != 0) {
2014-10-19 17:52:17 +02:00
Log(LogNotice, "ThreadPool")
<< "Thread pool; current: " << alive << "; adjustment: " << tthreads;
}
2013-08-27 15:57:00 +02:00
for (int i = 0; i < -tthreads; i++)
queue.KillWorker(m_ThreadGroup);
for (int i = 0; i < tthreads; i++)
queue.SpawnWorker(m_ThreadGroup);
2013-03-23 12:23:13 +01:00
}
queue.WaitTime = 0;
queue.ServiceTime = 0;
queue.TaskCount = 0;
total_pending += pending;
total_alive += alive;
total_avg_latency += avg_latency;
total_utilization += utilization;
2013-03-23 12:23:13 +01:00
}
double now = Utility::GetTime();
if (lastStats < now - 15) {
lastStats = now;
2014-10-19 17:52:17 +02:00
Log(LogNotice, "ThreadPool")
<< "Pool #" << m_ID << ": Pending tasks: " << total_pending << "; Average latency: "
<< (long)(total_avg_latency * 1000 / (sizeof(m_Queues) / sizeof(m_Queues[0]))) << "ms"
<< "; Threads: " << total_alive
<< "; Pool utilization: " << (total_utilization / (sizeof(m_Queues) / sizeof(m_Queues[0]))) << "%";
}
2013-03-23 12:23:13 +01:00
}
}
2013-03-23 12:23:13 +01:00
/**
* Note: Caller must hold m_Mutex
*/
void ThreadPool::Queue::SpawnWorker(boost::thread_group& group)
2013-03-23 12:23:13 +01:00
{
for (size_t i = 0; i < sizeof(Threads) / sizeof(Threads[0]); i++) {
if (Threads[i].State == ThreadDead) {
Log(LogDebug, "ThreadPool", "Spawning worker thread.");
Threads[i] = WorkerThread(ThreadIdle);
Threads[i].Thread = group.create_thread(boost::bind(&ThreadPool::WorkerThread::ThreadProc, boost::ref(Threads[i]), boost::ref(*this)));
2013-03-23 12:23:13 +01:00
break;
2013-03-06 11:03:50 +01:00
}
}
2012-06-24 02:56:48 +02:00
}
2013-03-23 12:23:13 +01:00
/**
* Note: Caller must hold Mutex.
2013-03-23 12:23:13 +01:00
*/
void ThreadPool::Queue::KillWorker(boost::thread_group& group)
2013-03-23 12:23:13 +01:00
{
for (size_t i = 0; i < sizeof(Threads) / sizeof(Threads[0]); i++) {
if (Threads[i].State == ThreadIdle && !Threads[i].Zombie) {
Log(LogDebug, "ThreadPool", "Killing worker thread.");
2013-03-23 12:23:13 +01:00
group.remove_thread(Threads[i].Thread);
Threads[i].Thread->detach();
delete Threads[i].Thread;
Threads[i].Zombie = true;
CV.notify_all();
2013-08-27 15:57:00 +02:00
break;
}
}
2013-03-23 12:23:13 +01:00
}
/**
* Note: Caller must hold queue Mutex.
*/
void ThreadPool::WorkerThread::UpdateUtilization(ThreadState state)
{
double utilization;
switch (State) {
case ThreadDead:
return;
case ThreadIdle:
utilization = 0;
break;
case ThreadBusy:
utilization = 1;
break;
default:
VERIFY(0);
}
double now = Utility::GetTime();
double time = now - LastUpdate;
const double avg_time = 5.0;
if (time > avg_time)
time = avg_time;
Utilization = (Utilization * (avg_time - time) + utilization * time) / avg_time;
LastUpdate = now;
if (state != ThreadUnspecified)
State = state;
}