waitable_event_posix.cc revision 28480d4f48373da735986b2a75e099d3cfddab3e
1// Copyright (c) 2011 The Chromium Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "base/synchronization/waitable_event.h" 6 7#include "base/synchronization/condition_variable.h" 8#include "base/synchronization/lock.h" 9#include "base/message_loop.h" 10 11// ----------------------------------------------------------------------------- 12// A WaitableEvent on POSIX is implemented as a wait-list. Currently we don't 13// support cross-process events (where one process can signal an event which 14// others are waiting on). Because of this, we can avoid having one thread per 15// listener in several cases. 16// 17// The WaitableEvent maintains a list of waiters, protected by a lock. Each 18// waiter is either an async wait, in which case we have a Task and the 19// MessageLoop to run it on, or a blocking wait, in which case we have the 20// condition variable to signal. 21// 22// Waiting involves grabbing the lock and adding oneself to the wait list. Async 23// waits can be canceled, which means grabbing the lock and removing oneself 24// from the list. 25// 26// Waiting on multiple events is handled by adding a single, synchronous wait to 27// the wait-list of many events. An event passes a pointer to itself when 28// firing a waiter and so we can store that pointer to find out which event 29// triggered. 30// ----------------------------------------------------------------------------- 31 32namespace base { 33 34// ----------------------------------------------------------------------------- 35// This is just an abstract base class for waking the two types of waiters 36// ----------------------------------------------------------------------------- 37WaitableEvent::WaitableEvent(bool manual_reset, bool initially_signaled) 38 : kernel_(new WaitableEventKernel(manual_reset, initially_signaled)) { 39} 40 41WaitableEvent::~WaitableEvent() { 42} 43 44void WaitableEvent::Reset() { 45 base::AutoLock locked(kernel_->lock_); 46 kernel_->signaled_ = false; 47} 48 49void WaitableEvent::Signal() { 50 base::AutoLock locked(kernel_->lock_); 51 52 if (kernel_->signaled_) 53 return; 54 55 if (kernel_->manual_reset_) { 56 SignalAll(); 57 kernel_->signaled_ = true; 58 } else { 59 // In the case of auto reset, if no waiters were woken, we remain 60 // signaled. 61 if (!SignalOne()) 62 kernel_->signaled_ = true; 63 } 64} 65 66bool WaitableEvent::IsSignaled() { 67 base::AutoLock locked(kernel_->lock_); 68 69 const bool result = kernel_->signaled_; 70 if (result && !kernel_->manual_reset_) 71 kernel_->signaled_ = false; 72 return result; 73} 74 75// ----------------------------------------------------------------------------- 76// Synchronous waits 77 78// ----------------------------------------------------------------------------- 79// This is a synchronous waiter. The thread is waiting on the given condition 80// variable and the fired flag in this object. 81// ----------------------------------------------------------------------------- 82class SyncWaiter : public WaitableEvent::Waiter { 83 public: 84 SyncWaiter() 85 : fired_(false), 86 signaling_event_(NULL), 87 lock_(), 88 cv_(&lock_) { 89 } 90 91 bool Fire(WaitableEvent* signaling_event) { 92 base::AutoLock locked(lock_); 93 94 if (fired_) 95 return false; 96 97 fired_ = true; 98 signaling_event_ = signaling_event; 99 100 cv_.Broadcast(); 101 102 // Unlike AsyncWaiter objects, SyncWaiter objects are stack-allocated on 103 // the blocking thread's stack. There is no |delete this;| in Fire. The 104 // SyncWaiter object is destroyed when it goes out of scope. 105 106 return true; 107 } 108 109 WaitableEvent* signaling_event() const { 110 return signaling_event_; 111 } 112 113 // --------------------------------------------------------------------------- 114 // These waiters are always stack allocated and don't delete themselves. Thus 115 // there's no problem and the ABA tag is the same as the object pointer. 116 // --------------------------------------------------------------------------- 117 bool Compare(void* tag) { 118 return this == tag; 119 } 120 121 // --------------------------------------------------------------------------- 122 // Called with lock held. 123 // --------------------------------------------------------------------------- 124 bool fired() const { 125 return fired_; 126 } 127 128 // --------------------------------------------------------------------------- 129 // During a TimedWait, we need a way to make sure that an auto-reset 130 // WaitableEvent doesn't think that this event has been signaled between 131 // unlocking it and removing it from the wait-list. Called with lock held. 132 // --------------------------------------------------------------------------- 133 void Disable() { 134 fired_ = true; 135 } 136 137 base::Lock* lock() { 138 return &lock_; 139 } 140 141 base::ConditionVariable* cv() { 142 return &cv_; 143 } 144 145 private: 146 bool fired_; 147 WaitableEvent* signaling_event_; // The WaitableEvent which woke us 148 base::Lock lock_; 149 base::ConditionVariable cv_; 150}; 151 152bool WaitableEvent::Wait() { 153#if defined(ANDROID) 154 // For debugging. See http://b/5244039 155 bool result = TimedWait(TimeDelta::FromSeconds(-1)); 156 if (!result) { 157 LOG(INFO) << "TimedWait() with infinite timeout should never fail!"; 158 } 159 return result; 160#else 161 return TimedWait(TimeDelta::FromSeconds(-1)); 162#endif 163} 164 165bool WaitableEvent::TimedWait(const TimeDelta& max_time) { 166 const Time end_time(Time::Now() + max_time); 167 const bool finite_time = max_time.ToInternalValue() >= 0; 168 169 kernel_->lock_.Acquire(); 170 if (kernel_->signaled_) { 171 if (!kernel_->manual_reset_) { 172 // In this case we were signaled when we had no waiters. Now that 173 // someone has waited upon us, we can automatically reset. 174 kernel_->signaled_ = false; 175 } 176 177 kernel_->lock_.Release(); 178 return true; 179 } 180 181 SyncWaiter sw; 182 sw.lock()->Acquire(); 183 184 Enqueue(&sw); 185 kernel_->lock_.Release(); 186 // We are violating locking order here by holding the SyncWaiter lock but not 187 // the WaitableEvent lock. However, this is safe because we don't lock @lock_ 188 // again before unlocking it. 189 190 for (;;) { 191 const Time current_time(Time::Now()); 192 193 if (sw.fired() || (finite_time && current_time >= end_time)) { 194 const bool return_value = sw.fired(); 195 196 // We can't acquire @lock_ before releasing the SyncWaiter lock (because 197 // of locking order), however, in between the two a signal could be fired 198 // and @sw would accept it, however we will still return false, so the 199 // signal would be lost on an auto-reset WaitableEvent. Thus we call 200 // Disable which makes sw::Fire return false. 201 sw.Disable(); 202 sw.lock()->Release(); 203 204 kernel_->lock_.Acquire(); 205 kernel_->Dequeue(&sw, &sw); 206 kernel_->lock_.Release(); 207 208 return return_value; 209 } 210 211 if (finite_time) { 212 const TimeDelta max_wait(end_time - current_time); 213 sw.cv()->TimedWait(max_wait); 214 } else { 215 sw.cv()->Wait(); 216 } 217 } 218} 219 220// ----------------------------------------------------------------------------- 221// Synchronous waiting on multiple objects. 222 223static bool // StrictWeakOrdering 224cmp_fst_addr(const std::pair<WaitableEvent*, unsigned> &a, 225 const std::pair<WaitableEvent*, unsigned> &b) { 226 return a.first < b.first; 227} 228 229// static 230size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables, 231 size_t count) { 232 DCHECK(count) << "Cannot wait on no events"; 233 234 // We need to acquire the locks in a globally consistent order. Thus we sort 235 // the array of waitables by address. We actually sort a pairs so that we can 236 // map back to the original index values later. 237 std::vector<std::pair<WaitableEvent*, size_t> > waitables; 238 waitables.reserve(count); 239 for (size_t i = 0; i < count; ++i) 240 waitables.push_back(std::make_pair(raw_waitables[i], i)); 241 242 DCHECK_EQ(count, waitables.size()); 243 244 sort(waitables.begin(), waitables.end(), cmp_fst_addr); 245 246 // The set of waitables must be distinct. Since we have just sorted by 247 // address, we can check this cheaply by comparing pairs of consecutive 248 // elements. 249 for (size_t i = 0; i < waitables.size() - 1; ++i) { 250 DCHECK(waitables[i].first != waitables[i+1].first); 251 } 252 253 SyncWaiter sw; 254 255 const size_t r = EnqueueMany(&waitables[0], count, &sw); 256 if (r) { 257 // One of the events is already signaled. The SyncWaiter has not been 258 // enqueued anywhere. EnqueueMany returns the count of remaining waitables 259 // when the signaled one was seen, so the index of the signaled event is 260 // @count - @r. 261 return waitables[count - r].second; 262 } 263 264 // At this point, we hold the locks on all the WaitableEvents and we have 265 // enqueued our waiter in them all. 266 sw.lock()->Acquire(); 267 // Release the WaitableEvent locks in the reverse order 268 for (size_t i = 0; i < count; ++i) { 269 waitables[count - (1 + i)].first->kernel_->lock_.Release(); 270 } 271 272 for (;;) { 273 if (sw.fired()) 274 break; 275 276 sw.cv()->Wait(); 277 } 278 sw.lock()->Release(); 279 280 // The address of the WaitableEvent which fired is stored in the SyncWaiter. 281 WaitableEvent *const signaled_event = sw.signaling_event(); 282 // This will store the index of the raw_waitables which fired. 283 size_t signaled_index = 0; 284 285 // Take the locks of each WaitableEvent in turn (except the signaled one) and 286 // remove our SyncWaiter from the wait-list 287 for (size_t i = 0; i < count; ++i) { 288 if (raw_waitables[i] != signaled_event) { 289 raw_waitables[i]->kernel_->lock_.Acquire(); 290 // There's no possible ABA issue with the address of the SyncWaiter here 291 // because it lives on the stack. Thus the tag value is just the pointer 292 // value again. 293 raw_waitables[i]->kernel_->Dequeue(&sw, &sw); 294 raw_waitables[i]->kernel_->lock_.Release(); 295 } else { 296 signaled_index = i; 297 } 298 } 299 300 return signaled_index; 301} 302 303// ----------------------------------------------------------------------------- 304// If return value == 0: 305// The locks of the WaitableEvents have been taken in order and the Waiter has 306// been enqueued in the wait-list of each. None of the WaitableEvents are 307// currently signaled 308// else: 309// None of the WaitableEvent locks are held. The Waiter has not been enqueued 310// in any of them and the return value is the index of the first WaitableEvent 311// which was signaled, from the end of the array. 312// ----------------------------------------------------------------------------- 313// static 314size_t WaitableEvent::EnqueueMany 315 (std::pair<WaitableEvent*, size_t>* waitables, 316 size_t count, Waiter* waiter) { 317 if (!count) 318 return 0; 319 320 waitables[0].first->kernel_->lock_.Acquire(); 321 if (waitables[0].first->kernel_->signaled_) { 322 if (!waitables[0].first->kernel_->manual_reset_) 323 waitables[0].first->kernel_->signaled_ = false; 324 waitables[0].first->kernel_->lock_.Release(); 325 return count; 326 } 327 328 const size_t r = EnqueueMany(waitables + 1, count - 1, waiter); 329 if (r) { 330 waitables[0].first->kernel_->lock_.Release(); 331 } else { 332 waitables[0].first->Enqueue(waiter); 333 } 334 335 return r; 336} 337 338// ----------------------------------------------------------------------------- 339 340 341// ----------------------------------------------------------------------------- 342// Private functions... 343 344WaitableEvent::WaitableEventKernel::WaitableEventKernel(bool manual_reset, 345 bool initially_signaled) 346 : manual_reset_(manual_reset), 347 signaled_(initially_signaled) { 348} 349 350WaitableEvent::WaitableEventKernel::~WaitableEventKernel() { 351} 352 353// ----------------------------------------------------------------------------- 354// Wake all waiting waiters. Called with lock held. 355// ----------------------------------------------------------------------------- 356bool WaitableEvent::SignalAll() { 357 bool signaled_at_least_one = false; 358 359 for (std::list<Waiter*>::iterator 360 i = kernel_->waiters_.begin(); i != kernel_->waiters_.end(); ++i) { 361 if ((*i)->Fire(this)) 362 signaled_at_least_one = true; 363 } 364 365 kernel_->waiters_.clear(); 366 return signaled_at_least_one; 367} 368 369// --------------------------------------------------------------------------- 370// Try to wake a single waiter. Return true if one was woken. Called with lock 371// held. 372// --------------------------------------------------------------------------- 373bool WaitableEvent::SignalOne() { 374 for (;;) { 375 if (kernel_->waiters_.empty()) 376 return false; 377 378 const bool r = (*kernel_->waiters_.begin())->Fire(this); 379 kernel_->waiters_.pop_front(); 380 if (r) 381 return true; 382 } 383} 384 385// ----------------------------------------------------------------------------- 386// Add a waiter to the list of those waiting. Called with lock held. 387// ----------------------------------------------------------------------------- 388void WaitableEvent::Enqueue(Waiter* waiter) { 389 kernel_->waiters_.push_back(waiter); 390} 391 392// ----------------------------------------------------------------------------- 393// Remove a waiter from the list of those waiting. Return true if the waiter was 394// actually removed. Called with lock held. 395// ----------------------------------------------------------------------------- 396bool WaitableEvent::WaitableEventKernel::Dequeue(Waiter* waiter, void* tag) { 397 for (std::list<Waiter*>::iterator 398 i = waiters_.begin(); i != waiters_.end(); ++i) { 399 if (*i == waiter && (*i)->Compare(tag)) { 400 waiters_.erase(i); 401 return true; 402 } 403 } 404 405 return false; 406} 407 408// ----------------------------------------------------------------------------- 409 410} // namespace base 411