WorkQueue#ParallelFor(): optionally don't pre-glue items together to chunks of different size

... to equally distribute the load across the workers.
This commit is contained in:
Alexander A. Klimov 2021-07-26 11:40:43 +02:00
parent 2b04e0a754
commit 8731d84299
2 changed files with 11 additions and 4 deletions

View File

@ -67,18 +67,25 @@ public:
template<typename VectorType, typename FuncType>
void ParallelFor(const VectorType& items, const FuncType& func)
{
ParallelFor(items, true, func);
}
template<typename VectorType, typename FuncType>
void ParallelFor(const VectorType& items, bool preChunk, const FuncType& func)
{
using SizeType = decltype(items.size());
SizeType totalCount = items.size();
SizeType chunks = preChunk ? m_ThreadCount : totalCount;
auto lock = AcquireLock();
SizeType offset = 0;
for (int i = 0; i < m_ThreadCount; i++) {
SizeType count = totalCount / static_cast<SizeType>(m_ThreadCount);
if (static_cast<SizeType>(i) < totalCount % static_cast<SizeType>(m_ThreadCount))
for (SizeType i = 0; i < chunks; i++) {
SizeType count = totalCount / chunks;
if (i < totalCount % chunks)
count++;
EnqueueUnlocked(lock, [&items, func, offset, count, this]() {

View File

@ -163,7 +163,7 @@ void IcingaDB::UpdateAllConfigObjects()
m_DumpedGlobals.IconImage.Reset();
});
upq.ParallelFor(types, [this](const Type::Ptr& type) {
upq.ParallelFor(types, false, [this](const Type::Ptr& type) {
String lcType = type->GetName().ToLower();
ConfigType *ctype = dynamic_cast<ConfigType *>(type.get());
if (!ctype)