1// Copyright (c) 2012 The Chromium Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include <stddef.h> 6 7#include <algorithm> 8#include <vector> 9 10#include "base/logging.h" 11#include "base/synchronization/condition_variable.h" 12#include "base/synchronization/lock.h" 13#include "base/synchronization/waitable_event.h" 14#include "base/threading/thread_restrictions.h" 15 16// ----------------------------------------------------------------------------- 17// A WaitableEvent on POSIX is implemented as a wait-list. Currently we don't 18// support cross-process events (where one process can signal an event which 19// others are waiting on). Because of this, we can avoid having one thread per 20// listener in several cases. 21// 22// The WaitableEvent maintains a list of waiters, protected by a lock. Each 23// waiter is either an async wait, in which case we have a Task and the 24// MessageLoop to run it on, or a blocking wait, in which case we have the 25// condition variable to signal. 26// 27// Waiting involves grabbing the lock and adding oneself to the wait list. Async 28// waits can be canceled, which means grabbing the lock and removing oneself 29// from the list. 30// 31// Waiting on multiple events is handled by adding a single, synchronous wait to 32// the wait-list of many events. An event passes a pointer to itself when 33// firing a waiter and so we can store that pointer to find out which event 34// triggered. 35// ----------------------------------------------------------------------------- 36 37namespace base { 38 39// ----------------------------------------------------------------------------- 40// This is just an abstract base class for waking the two types of waiters 41// ----------------------------------------------------------------------------- 42WaitableEvent::WaitableEvent(bool manual_reset, bool initially_signaled) 43 : kernel_(new WaitableEventKernel(manual_reset, initially_signaled)) { 44} 45 46WaitableEvent::~WaitableEvent() { 47} 48 49void WaitableEvent::Reset() { 50 base::AutoLock locked(kernel_->lock_); 51 kernel_->signaled_ = false; 52} 53 54void WaitableEvent::Signal() { 55 base::AutoLock locked(kernel_->lock_); 56 57 if (kernel_->signaled_) 58 return; 59 60 if (kernel_->manual_reset_) { 61 SignalAll(); 62 kernel_->signaled_ = true; 63 } else { 64 // In the case of auto reset, if no waiters were woken, we remain 65 // signaled. 66 if (!SignalOne()) 67 kernel_->signaled_ = true; 68 } 69} 70 71bool WaitableEvent::IsSignaled() { 72 base::AutoLock locked(kernel_->lock_); 73 74 const bool result = kernel_->signaled_; 75 if (result && !kernel_->manual_reset_) 76 kernel_->signaled_ = false; 77 return result; 78} 79 80// ----------------------------------------------------------------------------- 81// Synchronous waits 82 83// ----------------------------------------------------------------------------- 84// This is a synchronous waiter. The thread is waiting on the given condition 85// variable and the fired flag in this object. 86// ----------------------------------------------------------------------------- 87class SyncWaiter : public WaitableEvent::Waiter { 88 public: 89 SyncWaiter() 90 : fired_(false), 91 signaling_event_(NULL), 92 lock_(), 93 cv_(&lock_) { 94 } 95 96 bool Fire(WaitableEvent* signaling_event) override { 97 base::AutoLock locked(lock_); 98 99 if (fired_) 100 return false; 101 102 fired_ = true; 103 signaling_event_ = signaling_event; 104 105 cv_.Broadcast(); 106 107 // Unlike AsyncWaiter objects, SyncWaiter objects are stack-allocated on 108 // the blocking thread's stack. There is no |delete this;| in Fire. The 109 // SyncWaiter object is destroyed when it goes out of scope. 110 111 return true; 112 } 113 114 WaitableEvent* signaling_event() const { 115 return signaling_event_; 116 } 117 118 // --------------------------------------------------------------------------- 119 // These waiters are always stack allocated and don't delete themselves. Thus 120 // there's no problem and the ABA tag is the same as the object pointer. 121 // --------------------------------------------------------------------------- 122 bool Compare(void* tag) override { return this == tag; } 123 124 // --------------------------------------------------------------------------- 125 // Called with lock held. 126 // --------------------------------------------------------------------------- 127 bool fired() const { 128 return fired_; 129 } 130 131 // --------------------------------------------------------------------------- 132 // During a TimedWait, we need a way to make sure that an auto-reset 133 // WaitableEvent doesn't think that this event has been signaled between 134 // unlocking it and removing it from the wait-list. Called with lock held. 135 // --------------------------------------------------------------------------- 136 void Disable() { 137 fired_ = true; 138 } 139 140 base::Lock* lock() { 141 return &lock_; 142 } 143 144 base::ConditionVariable* cv() { 145 return &cv_; 146 } 147 148 private: 149 bool fired_; 150 WaitableEvent* signaling_event_; // The WaitableEvent which woke us 151 base::Lock lock_; 152 base::ConditionVariable cv_; 153}; 154 155void WaitableEvent::Wait() { 156 bool result = TimedWait(TimeDelta::FromSeconds(-1)); 157 DCHECK(result) << "TimedWait() should never fail with infinite timeout"; 158} 159 160bool WaitableEvent::TimedWait(const TimeDelta& max_time) { 161 base::ThreadRestrictions::AssertWaitAllowed(); 162 const TimeTicks end_time(TimeTicks::Now() + max_time); 163 const bool finite_time = max_time.ToInternalValue() >= 0; 164 165 kernel_->lock_.Acquire(); 166 if (kernel_->signaled_) { 167 if (!kernel_->manual_reset_) { 168 // In this case we were signaled when we had no waiters. Now that 169 // someone has waited upon us, we can automatically reset. 170 kernel_->signaled_ = false; 171 } 172 173 kernel_->lock_.Release(); 174 return true; 175 } 176 177 SyncWaiter sw; 178 sw.lock()->Acquire(); 179 180 Enqueue(&sw); 181 kernel_->lock_.Release(); 182 // We are violating locking order here by holding the SyncWaiter lock but not 183 // the WaitableEvent lock. However, this is safe because we don't lock @lock_ 184 // again before unlocking it. 185 186 for (;;) { 187 const TimeTicks current_time(TimeTicks::Now()); 188 189 if (sw.fired() || (finite_time && current_time >= end_time)) { 190 const bool return_value = sw.fired(); 191 192 // We can't acquire @lock_ before releasing the SyncWaiter lock (because 193 // of locking order), however, in between the two a signal could be fired 194 // and @sw would accept it, however we will still return false, so the 195 // signal would be lost on an auto-reset WaitableEvent. Thus we call 196 // Disable which makes sw::Fire return false. 197 sw.Disable(); 198 sw.lock()->Release(); 199 200 // This is a bug that has been enshrined in the interface of 201 // WaitableEvent now: |Dequeue| is called even when |sw.fired()| is true, 202 // even though it'll always return false in that case. However, taking 203 // the lock ensures that |Signal| has completed before we return and 204 // means that a WaitableEvent can synchronise its own destruction. 205 kernel_->lock_.Acquire(); 206 kernel_->Dequeue(&sw, &sw); 207 kernel_->lock_.Release(); 208 209 return return_value; 210 } 211 212 if (finite_time) { 213 const TimeDelta max_wait(end_time - current_time); 214 sw.cv()->TimedWait(max_wait); 215 } else { 216 sw.cv()->Wait(); 217 } 218 } 219} 220 221// ----------------------------------------------------------------------------- 222// Synchronous waiting on multiple objects. 223 224static bool // StrictWeakOrdering 225cmp_fst_addr(const std::pair<WaitableEvent*, unsigned> &a, 226 const std::pair<WaitableEvent*, unsigned> &b) { 227 return a.first < b.first; 228} 229 230// static 231size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables, 232 size_t count) { 233 base::ThreadRestrictions::AssertWaitAllowed(); 234 DCHECK(count) << "Cannot wait on no events"; 235 236 // We need to acquire the locks in a globally consistent order. Thus we sort 237 // the array of waitables by address. We actually sort a pairs so that we can 238 // map back to the original index values later. 239 std::vector<std::pair<WaitableEvent*, size_t> > waitables; 240 waitables.reserve(count); 241 for (size_t i = 0; i < count; ++i) 242 waitables.push_back(std::make_pair(raw_waitables[i], i)); 243 244 DCHECK_EQ(count, waitables.size()); 245 246 sort(waitables.begin(), waitables.end(), cmp_fst_addr); 247 248 // The set of waitables must be distinct. Since we have just sorted by 249 // address, we can check this cheaply by comparing pairs of consecutive 250 // elements. 251 for (size_t i = 0; i < waitables.size() - 1; ++i) { 252 DCHECK(waitables[i].first != waitables[i+1].first); 253 } 254 255 SyncWaiter sw; 256 257 const size_t r = EnqueueMany(&waitables[0], count, &sw); 258 if (r) { 259 // One of the events is already signaled. The SyncWaiter has not been 260 // enqueued anywhere. EnqueueMany returns the count of remaining waitables 261 // when the signaled one was seen, so the index of the signaled event is 262 // @count - @r. 263 return waitables[count - r].second; 264 } 265 266 // At this point, we hold the locks on all the WaitableEvents and we have 267 // enqueued our waiter in them all. 268 sw.lock()->Acquire(); 269 // Release the WaitableEvent locks in the reverse order 270 for (size_t i = 0; i < count; ++i) { 271 waitables[count - (1 + i)].first->kernel_->lock_.Release(); 272 } 273 274 for (;;) { 275 if (sw.fired()) 276 break; 277 278 sw.cv()->Wait(); 279 } 280 sw.lock()->Release(); 281 282 // The address of the WaitableEvent which fired is stored in the SyncWaiter. 283 WaitableEvent *const signaled_event = sw.signaling_event(); 284 // This will store the index of the raw_waitables which fired. 285 size_t signaled_index = 0; 286 287 // Take the locks of each WaitableEvent in turn (except the signaled one) and 288 // remove our SyncWaiter from the wait-list 289 for (size_t i = 0; i < count; ++i) { 290 if (raw_waitables[i] != signaled_event) { 291 raw_waitables[i]->kernel_->lock_.Acquire(); 292 // There's no possible ABA issue with the address of the SyncWaiter here 293 // because it lives on the stack. Thus the tag value is just the pointer 294 // value again. 295 raw_waitables[i]->kernel_->Dequeue(&sw, &sw); 296 raw_waitables[i]->kernel_->lock_.Release(); 297 } else { 298 // By taking this lock here we ensure that |Signal| has completed by the 299 // time we return, because |Signal| holds this lock. This matches the 300 // behaviour of |Wait| and |TimedWait|. 301 raw_waitables[i]->kernel_->lock_.Acquire(); 302 raw_waitables[i]->kernel_->lock_.Release(); 303 signaled_index = i; 304 } 305 } 306 307 return signaled_index; 308} 309 310// ----------------------------------------------------------------------------- 311// If return value == 0: 312// The locks of the WaitableEvents have been taken in order and the Waiter has 313// been enqueued in the wait-list of each. None of the WaitableEvents are 314// currently signaled 315// else: 316// None of the WaitableEvent locks are held. The Waiter has not been enqueued 317// in any of them and the return value is the index of the first WaitableEvent 318// which was signaled, from the end of the array. 319// ----------------------------------------------------------------------------- 320// static 321size_t WaitableEvent::EnqueueMany 322 (std::pair<WaitableEvent*, size_t>* waitables, 323 size_t count, Waiter* waiter) { 324 if (!count) 325 return 0; 326 327 waitables[0].first->kernel_->lock_.Acquire(); 328 if (waitables[0].first->kernel_->signaled_) { 329 if (!waitables[0].first->kernel_->manual_reset_) 330 waitables[0].first->kernel_->signaled_ = false; 331 waitables[0].first->kernel_->lock_.Release(); 332 return count; 333 } 334 335 const size_t r = EnqueueMany(waitables + 1, count - 1, waiter); 336 if (r) { 337 waitables[0].first->kernel_->lock_.Release(); 338 } else { 339 waitables[0].first->Enqueue(waiter); 340 } 341 342 return r; 343} 344 345// ----------------------------------------------------------------------------- 346 347 348// ----------------------------------------------------------------------------- 349// Private functions... 350 351WaitableEvent::WaitableEventKernel::WaitableEventKernel(bool manual_reset, 352 bool initially_signaled) 353 : manual_reset_(manual_reset), 354 signaled_(initially_signaled) { 355} 356 357WaitableEvent::WaitableEventKernel::~WaitableEventKernel() { 358} 359 360// ----------------------------------------------------------------------------- 361// Wake all waiting waiters. Called with lock held. 362// ----------------------------------------------------------------------------- 363bool WaitableEvent::SignalAll() { 364 bool signaled_at_least_one = false; 365 366 for (std::list<Waiter*>::iterator 367 i = kernel_->waiters_.begin(); i != kernel_->waiters_.end(); ++i) { 368 if ((*i)->Fire(this)) 369 signaled_at_least_one = true; 370 } 371 372 kernel_->waiters_.clear(); 373 return signaled_at_least_one; 374} 375 376// --------------------------------------------------------------------------- 377// Try to wake a single waiter. Return true if one was woken. Called with lock 378// held. 379// --------------------------------------------------------------------------- 380bool WaitableEvent::SignalOne() { 381 for (;;) { 382 if (kernel_->waiters_.empty()) 383 return false; 384 385 const bool r = (*kernel_->waiters_.begin())->Fire(this); 386 kernel_->waiters_.pop_front(); 387 if (r) 388 return true; 389 } 390} 391 392// ----------------------------------------------------------------------------- 393// Add a waiter to the list of those waiting. Called with lock held. 394// ----------------------------------------------------------------------------- 395void WaitableEvent::Enqueue(Waiter* waiter) { 396 kernel_->waiters_.push_back(waiter); 397} 398 399// ----------------------------------------------------------------------------- 400// Remove a waiter from the list of those waiting. Return true if the waiter was 401// actually removed. Called with lock held. 402// ----------------------------------------------------------------------------- 403bool WaitableEvent::WaitableEventKernel::Dequeue(Waiter* waiter, void* tag) { 404 for (std::list<Waiter*>::iterator 405 i = waiters_.begin(); i != waiters_.end(); ++i) { 406 if (*i == waiter && (*i)->Compare(tag)) { 407 waiters_.erase(i); 408 return true; 409 } 410 } 411 412 return false; 413} 414 415// ----------------------------------------------------------------------------- 416 417} // namespace base 418