1// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "base/waitable_event.h" 6 7#include "base/condition_variable.h" 8#include "base/lock.h" 9#include "base/message_loop.h" 10 11// ----------------------------------------------------------------------------- 12// A WaitableEvent on POSIX is implemented as a wait-list. Currently we don't 13// support cross-process events (where one process can signal an event which 14// others are waiting on). Because of this, we can avoid having one thread per 15// listener in several cases. 16// 17// The WaitableEvent maintains a list of waiters, protected by a lock. Each 18// waiter is either an async wait, in which case we have a Task and the 19// MessageLoop to run it on, or a blocking wait, in which case we have the 20// condition variable to signal. 21// 22// Waiting involves grabbing the lock and adding oneself to the wait list. Async 23// waits can be canceled, which means grabbing the lock and removing oneself 24// from the list. 25// 26// Waiting on multiple events is handled by adding a single, synchronous wait to 27// the wait-list of many events. An event passes a pointer to itself when 28// firing a waiter and so we can store that pointer to find out which event 29// triggered. 30// ----------------------------------------------------------------------------- 31 32namespace base { 33 34// ----------------------------------------------------------------------------- 35// This is just an abstract base class for waking the two types of waiters 36// ----------------------------------------------------------------------------- 37WaitableEvent::WaitableEvent(bool manual_reset, bool initially_signaled) 38 : kernel_(new WaitableEventKernel(manual_reset, initially_signaled)) { 39} 40 41WaitableEvent::~WaitableEvent() { 42} 43 44void WaitableEvent::Reset() { 45 AutoLock locked(kernel_->lock_); 46 kernel_->signaled_ = false; 47} 48 49void WaitableEvent::Signal() { 50 AutoLock locked(kernel_->lock_); 51 52 if (kernel_->signaled_) 53 return; 54 55 if (kernel_->manual_reset_) { 56 SignalAll(); 57 kernel_->signaled_ = true; 58 } else { 59 // In the case of auto reset, if no waiters were woken, we remain 60 // signaled. 61 if (!SignalOne()) 62 kernel_->signaled_ = true; 63 } 64} 65 66bool WaitableEvent::IsSignaled() { 67 AutoLock locked(kernel_->lock_); 68 69 const bool result = kernel_->signaled_; 70 if (result && !kernel_->manual_reset_) 71 kernel_->signaled_ = false; 72 return result; 73} 74 75// ----------------------------------------------------------------------------- 76// Synchronous waits 77 78// ----------------------------------------------------------------------------- 79// This is a synchronous waiter. The thread is waiting on the given condition 80// variable and the fired flag in this object. 81// ----------------------------------------------------------------------------- 82class SyncWaiter : public WaitableEvent::Waiter { 83 public: 84 SyncWaiter() 85 : fired_(false), 86 signaling_event_(NULL), 87 lock_(), 88 cv_(&lock_) { 89 } 90 91 bool Fire(WaitableEvent* signaling_event) { 92 AutoLock locked(lock_); 93 94 if (fired_) 95 return false; 96 97 fired_ = true; 98 signaling_event_ = signaling_event; 99 100 cv_.Broadcast(); 101 102 // Unlike AsyncWaiter objects, SyncWaiter objects are stack-allocated on 103 // the blocking thread's stack. There is no |delete this;| in Fire. The 104 // SyncWaiter object is destroyed when it goes out of scope. 105 106 return true; 107 } 108 109 WaitableEvent* signaling_event() const { 110 return signaling_event_; 111 } 112 113 // --------------------------------------------------------------------------- 114 // These waiters are always stack allocated and don't delete themselves. Thus 115 // there's no problem and the ABA tag is the same as the object pointer. 116 // --------------------------------------------------------------------------- 117 bool Compare(void* tag) { 118 return this == tag; 119 } 120 121 // --------------------------------------------------------------------------- 122 // Called with lock held. 123 // --------------------------------------------------------------------------- 124 bool fired() const { 125 return fired_; 126 } 127 128 // --------------------------------------------------------------------------- 129 // During a TimedWait, we need a way to make sure that an auto-reset 130 // WaitableEvent doesn't think that this event has been signaled between 131 // unlocking it and removing it from the wait-list. Called with lock held. 132 // --------------------------------------------------------------------------- 133 void Disable() { 134 fired_ = true; 135 } 136 137 Lock* lock() { 138 return &lock_; 139 } 140 141 ConditionVariable* cv() { 142 return &cv_; 143 } 144 145 private: 146 bool fired_; 147 WaitableEvent* signaling_event_; // The WaitableEvent which woke us 148 Lock lock_; 149 ConditionVariable cv_; 150}; 151 152bool WaitableEvent::TimedWait(const TimeDelta& max_time) { 153 const Time end_time(Time::Now() + max_time); 154 const bool finite_time = max_time.ToInternalValue() >= 0; 155 156 kernel_->lock_.Acquire(); 157 if (kernel_->signaled_) { 158 if (!kernel_->manual_reset_) { 159 // In this case we were signaled when we had no waiters. Now that 160 // someone has waited upon us, we can automatically reset. 161 kernel_->signaled_ = false; 162 } 163 164 kernel_->lock_.Release(); 165 return true; 166 } 167 168 SyncWaiter sw; 169 sw.lock()->Acquire(); 170 171 Enqueue(&sw); 172 kernel_->lock_.Release(); 173 // We are violating locking order here by holding the SyncWaiter lock but not 174 // the WaitableEvent lock. However, this is safe because we don't lock @lock_ 175 // again before unlocking it. 176 177 for (;;) { 178 const Time current_time(Time::Now()); 179 180 if (sw.fired() || (finite_time && current_time >= end_time)) { 181 const bool return_value = sw.fired(); 182 183 // We can't acquire @lock_ before releasing the SyncWaiter lock (because 184 // of locking order), however, in between the two a signal could be fired 185 // and @sw would accept it, however we will still return false, so the 186 // signal would be lost on an auto-reset WaitableEvent. Thus we call 187 // Disable which makes sw::Fire return false. 188 sw.Disable(); 189 sw.lock()->Release(); 190 191 kernel_->lock_.Acquire(); 192 kernel_->Dequeue(&sw, &sw); 193 kernel_->lock_.Release(); 194 195 return return_value; 196 } 197 198 if (finite_time) { 199 const TimeDelta max_wait(end_time - current_time); 200 sw.cv()->TimedWait(max_wait); 201 } else { 202 sw.cv()->Wait(); 203 } 204 } 205} 206 207bool WaitableEvent::Wait() { 208 return TimedWait(TimeDelta::FromSeconds(-1)); 209} 210 211// ----------------------------------------------------------------------------- 212 213 214// ----------------------------------------------------------------------------- 215// Synchronous waiting on multiple objects. 216 217static bool // StrictWeakOrdering 218cmp_fst_addr(const std::pair<WaitableEvent*, unsigned> &a, 219 const std::pair<WaitableEvent*, unsigned> &b) { 220 return a.first < b.first; 221} 222 223// static 224size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables, 225 size_t count) { 226 DCHECK(count) << "Cannot wait on no events"; 227 228 // We need to acquire the locks in a globally consistent order. Thus we sort 229 // the array of waitables by address. We actually sort a pairs so that we can 230 // map back to the original index values later. 231 std::vector<std::pair<WaitableEvent*, size_t> > waitables; 232 waitables.reserve(count); 233 for (size_t i = 0; i < count; ++i) 234 waitables.push_back(std::make_pair(raw_waitables[i], i)); 235 236 DCHECK_EQ(count, waitables.size()); 237 238 sort(waitables.begin(), waitables.end(), cmp_fst_addr); 239 240 // The set of waitables must be distinct. Since we have just sorted by 241 // address, we can check this cheaply by comparing pairs of consecutive 242 // elements. 243 for (size_t i = 0; i < waitables.size() - 1; ++i) { 244 DCHECK(waitables[i].first != waitables[i+1].first); 245 } 246 247 SyncWaiter sw; 248 249 const size_t r = EnqueueMany(&waitables[0], count, &sw); 250 if (r) { 251 // One of the events is already signaled. The SyncWaiter has not been 252 // enqueued anywhere. EnqueueMany returns the count of remaining waitables 253 // when the signaled one was seen, so the index of the signaled event is 254 // @count - @r. 255 return waitables[count - r].second; 256 } 257 258 // At this point, we hold the locks on all the WaitableEvents and we have 259 // enqueued our waiter in them all. 260 sw.lock()->Acquire(); 261 // Release the WaitableEvent locks in the reverse order 262 for (size_t i = 0; i < count; ++i) { 263 waitables[count - (1 + i)].first->kernel_->lock_.Release(); 264 } 265 266 for (;;) { 267 if (sw.fired()) 268 break; 269 270 sw.cv()->Wait(); 271 } 272 sw.lock()->Release(); 273 274 // The address of the WaitableEvent which fired is stored in the SyncWaiter. 275 WaitableEvent *const signaled_event = sw.signaling_event(); 276 // This will store the index of the raw_waitables which fired. 277 size_t signaled_index = 0; 278 279 // Take the locks of each WaitableEvent in turn (except the signaled one) and 280 // remove our SyncWaiter from the wait-list 281 for (size_t i = 0; i < count; ++i) { 282 if (raw_waitables[i] != signaled_event) { 283 raw_waitables[i]->kernel_->lock_.Acquire(); 284 // There's no possible ABA issue with the address of the SyncWaiter here 285 // because it lives on the stack. Thus the tag value is just the pointer 286 // value again. 287 raw_waitables[i]->kernel_->Dequeue(&sw, &sw); 288 raw_waitables[i]->kernel_->lock_.Release(); 289 } else { 290 signaled_index = i; 291 } 292 } 293 294 return signaled_index; 295} 296 297// ----------------------------------------------------------------------------- 298// If return value == 0: 299// The locks of the WaitableEvents have been taken in order and the Waiter has 300// been enqueued in the wait-list of each. None of the WaitableEvents are 301// currently signaled 302// else: 303// None of the WaitableEvent locks are held. The Waiter has not been enqueued 304// in any of them and the return value is the index of the first WaitableEvent 305// which was signaled, from the end of the array. 306// ----------------------------------------------------------------------------- 307// static 308size_t WaitableEvent::EnqueueMany 309 (std::pair<WaitableEvent*, size_t>* waitables, 310 size_t count, Waiter* waiter) { 311 if (!count) 312 return 0; 313 314 waitables[0].first->kernel_->lock_.Acquire(); 315 if (waitables[0].first->kernel_->signaled_) { 316 if (!waitables[0].first->kernel_->manual_reset_) 317 waitables[0].first->kernel_->signaled_ = false; 318 waitables[0].first->kernel_->lock_.Release(); 319 return count; 320 } 321 322 const size_t r = EnqueueMany(waitables + 1, count - 1, waiter); 323 if (r) { 324 waitables[0].first->kernel_->lock_.Release(); 325 } else { 326 waitables[0].first->Enqueue(waiter); 327 } 328 329 return r; 330} 331 332// ----------------------------------------------------------------------------- 333 334 335// ----------------------------------------------------------------------------- 336// Private functions... 337 338// ----------------------------------------------------------------------------- 339// Wake all waiting waiters. Called with lock held. 340// ----------------------------------------------------------------------------- 341bool WaitableEvent::SignalAll() { 342 bool signaled_at_least_one = false; 343 344 for (std::list<Waiter*>::iterator 345 i = kernel_->waiters_.begin(); i != kernel_->waiters_.end(); ++i) { 346 if ((*i)->Fire(this)) 347 signaled_at_least_one = true; 348 } 349 350 kernel_->waiters_.clear(); 351 return signaled_at_least_one; 352} 353 354// --------------------------------------------------------------------------- 355// Try to wake a single waiter. Return true if one was woken. Called with lock 356// held. 357// --------------------------------------------------------------------------- 358bool WaitableEvent::SignalOne() { 359 for (;;) { 360 if (kernel_->waiters_.empty()) 361 return false; 362 363 const bool r = (*kernel_->waiters_.begin())->Fire(this); 364 kernel_->waiters_.pop_front(); 365 if (r) 366 return true; 367 } 368} 369 370// ----------------------------------------------------------------------------- 371// Add a waiter to the list of those waiting. Called with lock held. 372// ----------------------------------------------------------------------------- 373void WaitableEvent::Enqueue(Waiter* waiter) { 374 kernel_->waiters_.push_back(waiter); 375} 376 377// ----------------------------------------------------------------------------- 378// Remove a waiter from the list of those waiting. Return true if the waiter was 379// actually removed. Called with lock held. 380// ----------------------------------------------------------------------------- 381bool WaitableEvent::WaitableEventKernel::Dequeue(Waiter* waiter, void* tag) { 382 for (std::list<Waiter*>::iterator 383 i = waiters_.begin(); i != waiters_.end(); ++i) { 384 if (*i == waiter && (*i)->Compare(tag)) { 385 waiters_.erase(i); 386 return true; 387 } 388 } 389 390 return false; 391} 392 393// ----------------------------------------------------------------------------- 394 395} // namespace base 396