1//==-- llvm/Support/ThreadPool.cpp - A ThreadPool implementation -*- C++ -*-==//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements a crude C++11 based thread pool.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Support/ThreadPool.h"
15
16#include "llvm/Config/llvm-config.h"
17#include "llvm/Support/raw_ostream.h"
18
19using namespace llvm;
20
21#if LLVM_ENABLE_THREADS
22
23// Default to std::thread::hardware_concurrency
24ThreadPool::ThreadPool() : ThreadPool(std::thread::hardware_concurrency()) {}
25
26ThreadPool::ThreadPool(unsigned ThreadCount)
27    : ActiveThreads(0), EnableFlag(true) {
28  // Create ThreadCount threads that will loop forever, wait on QueueCondition
29  // for tasks to be queued or the Pool to be destroyed.
30  Threads.reserve(ThreadCount);
31  for (unsigned ThreadID = 0; ThreadID < ThreadCount; ++ThreadID) {
32    Threads.emplace_back([&] {
33      while (true) {
34        PackagedTaskTy Task;
35        {
36          std::unique_lock<std::mutex> LockGuard(QueueLock);
37          // Wait for tasks to be pushed in the queue
38          QueueCondition.wait(LockGuard,
39                              [&] { return !EnableFlag || !Tasks.empty(); });
40          // Exit condition
41          if (!EnableFlag && Tasks.empty())
42            return;
43          // Yeah, we have a task, grab it and release the lock on the queue
44
45          // We first need to signal that we are active before popping the queue
46          // in order for wait() to properly detect that even if the queue is
47          // empty, there is still a task in flight.
48          {
49            ++ActiveThreads;
50            std::unique_lock<std::mutex> LockGuard(CompletionLock);
51          }
52          Task = std::move(Tasks.front());
53          Tasks.pop();
54        }
55        // Run the task we just grabbed
56#ifndef _MSC_VER
57        Task();
58#else
59        Task(/* unused */ false);
60#endif
61
62        {
63          // Adjust `ActiveThreads`, in case someone waits on ThreadPool::wait()
64          std::unique_lock<std::mutex> LockGuard(CompletionLock);
65          --ActiveThreads;
66        }
67
68        // Notify task completion, in case someone waits on ThreadPool::wait()
69        CompletionCondition.notify_all();
70      }
71    });
72  }
73}
74
75void ThreadPool::wait() {
76  // Wait for all threads to complete and the queue to be empty
77  std::unique_lock<std::mutex> LockGuard(CompletionLock);
78  // The order of the checks for ActiveThreads and Tasks.empty() matters because
79  // any active threads might be modifying the Tasks queue, and this would be a
80  // race.
81  CompletionCondition.wait(LockGuard,
82                           [&] { return !ActiveThreads && Tasks.empty(); });
83}
84
85std::shared_future<ThreadPool::VoidTy> ThreadPool::asyncImpl(TaskTy Task) {
86  /// Wrap the Task in a packaged_task to return a future object.
87  PackagedTaskTy PackagedTask(std::move(Task));
88  auto Future = PackagedTask.get_future();
89  {
90    // Lock the queue and push the new task
91    std::unique_lock<std::mutex> LockGuard(QueueLock);
92
93    // Don't allow enqueueing after disabling the pool
94    assert(EnableFlag && "Queuing a thread during ThreadPool destruction");
95
96    Tasks.push(std::move(PackagedTask));
97  }
98  QueueCondition.notify_one();
99  return Future.share();
100}
101
102// The destructor joins all threads, waiting for completion.
103ThreadPool::~ThreadPool() {
104  {
105    std::unique_lock<std::mutex> LockGuard(QueueLock);
106    EnableFlag = false;
107  }
108  QueueCondition.notify_all();
109  for (auto &Worker : Threads)
110    Worker.join();
111}
112
113#else // LLVM_ENABLE_THREADS Disabled
114
115ThreadPool::ThreadPool() : ThreadPool(0) {}
116
117// No threads are launched, issue a warning if ThreadCount is not 0
118ThreadPool::ThreadPool(unsigned ThreadCount)
119    : ActiveThreads(0) {
120  if (ThreadCount) {
121    errs() << "Warning: request a ThreadPool with " << ThreadCount
122           << " threads, but LLVM_ENABLE_THREADS has been turned off\n";
123  }
124}
125
126void ThreadPool::wait() {
127  // Sequential implementation running the tasks
128  while (!Tasks.empty()) {
129    auto Task = std::move(Tasks.front());
130    Tasks.pop();
131#ifndef _MSC_VER
132        Task();
133#else
134        Task(/* unused */ false);
135#endif
136  }
137}
138
139std::shared_future<ThreadPool::VoidTy> ThreadPool::asyncImpl(TaskTy Task) {
140#ifndef _MSC_VER
141  // Get a Future with launch::deferred execution using std::async
142  auto Future = std::async(std::launch::deferred, std::move(Task)).share();
143  // Wrap the future so that both ThreadPool::wait() can operate and the
144  // returned future can be sync'ed on.
145  PackagedTaskTy PackagedTask([Future]() { Future.get(); });
146#else
147  auto Future = std::async(std::launch::deferred, std::move(Task), false).share();
148  PackagedTaskTy PackagedTask([Future](bool) -> bool { Future.get(); return false; });
149#endif
150  Tasks.push(std::move(PackagedTask));
151  return Future;
152}
153
154ThreadPool::~ThreadPool() {
155  wait();
156}
157
158#endif
159