teak-llvm/llvm/unittests/Support/ThreadPool.cpp
Zachary Turner 1b76a128a8 Enable ThreadPool to support tasks that return values.
Previously ThreadPool could only queue async "jobs", i.e. work
that was done for its side effects and not for its result.  It's
useful occasionally to queue async work that returns a value.
From an API perspective, this is very intuitive.  The previous
API just returned a shared_future<void>, so all we need to do is
make it return a shared_future<T>, where T is the type of value
that the operation returns.

Making this work required a little magic, but ultimately it's not
too bad.  Instead of keeping a shared queue<packaged_task<void()>>
we just keep a shared queue<unique_ptr<TaskBase>>, where TaskBase
is a class with a pure virtual execute() method, then have a
templated derived class that stores a packaged_task<T()>.  Everything
else works out pretty cleanly.

Differential Revision: https://reviews.llvm.org/D48115

llvm-svn: 334643
2018-06-13 19:29:16 +00:00

186 lines
4.7 KiB
C++

//========- unittests/Support/ThreadPools.cpp - ThreadPools.h tests --========//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/Support/ThreadPool.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/TargetSelect.h"
#include "gtest/gtest.h"
using namespace llvm;
// Fixture for the unittests, allowing to *temporarily* disable the unittests
// on a particular platform
class ThreadPoolTest : public testing::Test {
Triple Host;
SmallVector<Triple::ArchType, 4> UnsupportedArchs;
SmallVector<Triple::OSType, 4> UnsupportedOSs;
SmallVector<Triple::EnvironmentType, 1> UnsupportedEnvironments;
protected:
// This is intended for platform as a temporary "XFAIL"
bool isUnsupportedOSOrEnvironment() {
Triple Host(Triple::normalize(sys::getProcessTriple()));
if (find(UnsupportedEnvironments, Host.getEnvironment()) !=
UnsupportedEnvironments.end())
return true;
if (is_contained(UnsupportedOSs, Host.getOS()))
return true;
if (is_contained(UnsupportedArchs, Host.getArch()))
return true;
return false;
}
ThreadPoolTest() {
// Add unsupported configuration here, example:
// UnsupportedArchs.push_back(Triple::x86_64);
// See https://llvm.org/bugs/show_bug.cgi?id=25829
UnsupportedArchs.push_back(Triple::ppc64le);
UnsupportedArchs.push_back(Triple::ppc64);
}
/// Make sure this thread not progress faster than the main thread.
void waitForMainThread() {
std::unique_lock<std::mutex> LockGuard(WaitMainThreadMutex);
WaitMainThread.wait(LockGuard, [&] { return MainThreadReady; });
}
/// Set the readiness of the main thread.
void setMainThreadReady() {
{
std::unique_lock<std::mutex> LockGuard(WaitMainThreadMutex);
MainThreadReady = true;
}
WaitMainThread.notify_all();
}
void SetUp() override { MainThreadReady = false; }
std::condition_variable WaitMainThread;
std::mutex WaitMainThreadMutex;
bool MainThreadReady;
};
#define CHECK_UNSUPPORTED() \
do { \
if (isUnsupportedOSOrEnvironment()) \
return; \
} while (0); \
TEST_F(ThreadPoolTest, AsyncBarrier) {
CHECK_UNSUPPORTED();
// test that async & barrier work together properly.
std::atomic_int checked_in{0};
ThreadPool Pool;
for (size_t i = 0; i < 5; ++i) {
Pool.async([this, &checked_in] {
waitForMainThread();
++checked_in;
});
}
ASSERT_EQ(0, checked_in);
setMainThreadReady();
Pool.wait();
ASSERT_EQ(5, checked_in);
}
static void TestFunc(std::atomic_int &checked_in, int i) { checked_in += i; }
TEST_F(ThreadPoolTest, AsyncBarrierArgs) {
CHECK_UNSUPPORTED();
// Test that async works with a function requiring multiple parameters.
std::atomic_int checked_in{0};
ThreadPool Pool;
for (size_t i = 0; i < 5; ++i) {
Pool.async(TestFunc, std::ref(checked_in), i);
}
Pool.wait();
ASSERT_EQ(10, checked_in);
}
TEST_F(ThreadPoolTest, Async) {
CHECK_UNSUPPORTED();
ThreadPool Pool;
std::atomic_int i{0};
Pool.async([this, &i] {
waitForMainThread();
++i;
});
Pool.async([&i] { ++i; });
ASSERT_NE(2, i.load());
setMainThreadReady();
Pool.wait();
ASSERT_EQ(2, i.load());
}
TEST_F(ThreadPoolTest, GetFuture) {
CHECK_UNSUPPORTED();
ThreadPool Pool{2};
std::atomic_int i{0};
Pool.async([this, &i] {
waitForMainThread();
++i;
});
// Force the future using get()
Pool.async([&i] { ++i; }).get();
ASSERT_NE(2, i.load());
setMainThreadReady();
Pool.wait();
ASSERT_EQ(2, i.load());
}
TEST_F(ThreadPoolTest, TaskWithResult) {
CHECK_UNSUPPORTED();
// By making only 1 thread in the pool the two tasks are serialized with
// respect to each other, which means that the second one must return 2.
ThreadPool Pool{1};
std::atomic_int i{0};
Pool.async([this, &i] {
waitForMainThread();
++i;
});
// Force the future using get()
std::shared_future<int> Future = Pool.async([&i] { return ++i; });
ASSERT_EQ(0, i.load());
setMainThreadReady();
int Result = Future.get();
ASSERT_EQ(2, i.load());
ASSERT_EQ(2, Result);
}
TEST_F(ThreadPoolTest, PoolDestruction) {
CHECK_UNSUPPORTED();
// Test that we are waiting on destruction
std::atomic_int checked_in{0};
{
ThreadPool Pool;
for (size_t i = 0; i < 5; ++i) {
Pool.async([this, &checked_in] {
waitForMainThread();
++checked_in;
});
}
ASSERT_EQ(0, checked_in);
setMainThreadReady();
}
ASSERT_EQ(5, checked_in);
}