1// Copyright 2013 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "base/message_loop/message_loop.h"
6
7#include <algorithm>
8#include <utility>
9
10#include "base/bind.h"
11#include "base/compiler_specific.h"
12#include "base/lazy_instance.h"
13#include "base/logging.h"
14#include "base/memory/scoped_ptr.h"
15#include "base/message_loop/message_pump_default.h"
16#include "base/metrics/histogram.h"
17#include "base/metrics/statistics_recorder.h"
18#include "base/run_loop.h"
19#include "base/thread_task_runner_handle.h"
20#include "base/threading/thread_local.h"
21#include "base/time/time.h"
22#include "base/trace_event/trace_event.h"
23#include "base/tracked_objects.h"
24#include "build/build_config.h"
25
26#if defined(OS_MACOSX)
27#include "base/message_loop/message_pump_mac.h"
28#endif
29#if defined(OS_POSIX) && !defined(OS_IOS)
30#include "base/message_loop/message_pump_libevent.h"
31#endif
32#if defined(OS_ANDROID)
33#include "base/message_loop/message_pump_android.h"
34#endif
35#if defined(USE_GLIB)
36#include "base/message_loop/message_pump_glib.h"
37#endif
38
39namespace base {
40
41namespace {
42
43// A lazily created thread local storage for quick access to a thread's message
44// loop, if one exists.  This should be safe and free of static constructors.
45LazyInstance<base::ThreadLocalPointer<MessageLoop> >::Leaky lazy_tls_ptr =
46    LAZY_INSTANCE_INITIALIZER;
47
48// Logical events for Histogram profiling. Run with --message-loop-histogrammer
49// to get an accounting of messages and actions taken on each thread.
50const int kTaskRunEvent = 0x1;
51#if !defined(OS_NACL)
52const int kTimerEvent = 0x2;
53
54// Provide range of message IDs for use in histogramming and debug display.
55const int kLeastNonZeroMessageId = 1;
56const int kMaxMessageId = 1099;
57const int kNumberOfDistinctMessagesDisplayed = 1100;
58
59// Provide a macro that takes an expression (such as a constant, or macro
60// constant) and creates a pair to initialize an array of pairs.  In this case,
61// our pair consists of the expressions value, and the "stringized" version
62// of the expression (i.e., the expression put in quotes).  For example, if
63// we have:
64//    #define FOO 2
65//    #define BAR 5
66// then the following:
67//    VALUE_TO_NUMBER_AND_NAME(FOO + BAR)
68// will expand to:
69//   {7, "FOO + BAR"}
70// We use the resulting array as an argument to our histogram, which reads the
71// number as a bucket identifier, and proceeds to use the corresponding name
72// in the pair (i.e., the quoted string) when printing out a histogram.
73#define VALUE_TO_NUMBER_AND_NAME(name) {name, #name},
74
75const LinearHistogram::DescriptionPair event_descriptions_[] = {
76  // Provide some pretty print capability in our histogram for our internal
77  // messages.
78
79  // A few events we handle (kindred to messages), and used to profile actions.
80  VALUE_TO_NUMBER_AND_NAME(kTaskRunEvent)
81  VALUE_TO_NUMBER_AND_NAME(kTimerEvent)
82
83  {-1, NULL}  // The list must be null-terminated, per API to histogram.
84};
85#endif  // !defined(OS_NACL)
86
87bool enable_histogrammer_ = false;
88
89MessageLoop::MessagePumpFactory* message_pump_for_ui_factory_ = NULL;
90
91#if defined(OS_IOS)
92typedef MessagePumpIOSForIO MessagePumpForIO;
93#elif defined(OS_NACL_SFI)
94typedef MessagePumpDefault MessagePumpForIO;
95#elif defined(OS_POSIX)
96typedef MessagePumpLibevent MessagePumpForIO;
97#endif
98
99#if !defined(OS_NACL_SFI)
100MessagePumpForIO* ToPumpIO(MessagePump* pump) {
101  return static_cast<MessagePumpForIO*>(pump);
102}
103#endif  // !defined(OS_NACL_SFI)
104
105scoped_ptr<MessagePump> ReturnPump(scoped_ptr<MessagePump> pump) {
106  return pump;
107}
108
109}  // namespace
110
111//------------------------------------------------------------------------------
112
113MessageLoop::TaskObserver::TaskObserver() {
114}
115
116MessageLoop::TaskObserver::~TaskObserver() {
117}
118
119MessageLoop::DestructionObserver::~DestructionObserver() {
120}
121
122//------------------------------------------------------------------------------
123
124MessageLoop::MessageLoop(Type type)
125    : MessageLoop(type, MessagePumpFactoryCallback()) {
126  BindToCurrentThread();
127}
128
129MessageLoop::MessageLoop(scoped_ptr<MessagePump> pump)
130    : MessageLoop(TYPE_CUSTOM, Bind(&ReturnPump, Passed(&pump))) {
131  BindToCurrentThread();
132}
133
134MessageLoop::~MessageLoop() {
135  // If |pump_| is non-null, this message loop has been bound and should be the
136  // current one on this thread. Otherwise, this loop is being destructed before
137  // it was bound to a thread, so a different message loop (or no loop at all)
138  // may be current.
139  DCHECK((pump_ && current() == this) || (!pump_ && current() != this));
140
141  // iOS just attaches to the loop, it doesn't Run it.
142  // TODO(stuartmorgan): Consider wiring up a Detach().
143#if !defined(OS_IOS)
144  DCHECK(!run_loop_);
145#endif
146
147#if defined(OS_WIN)
148  if (in_high_res_mode_)
149    Time::ActivateHighResolutionTimer(false);
150#endif
151  // Clean up any unprocessed tasks, but take care: deleting a task could
152  // result in the addition of more tasks (e.g., via DeleteSoon).  We set a
153  // limit on the number of times we will allow a deleted task to generate more
154  // tasks.  Normally, we should only pass through this loop once or twice.  If
155  // we end up hitting the loop limit, then it is probably due to one task that
156  // is being stubborn.  Inspect the queues to see who is left.
157  bool did_work;
158  for (int i = 0; i < 100; ++i) {
159    DeletePendingTasks();
160    ReloadWorkQueue();
161    // If we end up with empty queues, then break out of the loop.
162    did_work = DeletePendingTasks();
163    if (!did_work)
164      break;
165  }
166  DCHECK(!did_work);
167
168  // Let interested parties have one last shot at accessing this.
169  FOR_EACH_OBSERVER(DestructionObserver, destruction_observers_,
170                    WillDestroyCurrentMessageLoop());
171
172  thread_task_runner_handle_.reset();
173
174  // Tell the incoming queue that we are dying.
175  incoming_task_queue_->WillDestroyCurrentMessageLoop();
176  incoming_task_queue_ = NULL;
177  unbound_task_runner_ = NULL;
178  task_runner_ = NULL;
179
180  // OK, now make it so that no one can find us.
181  if (current() == this)
182    lazy_tls_ptr.Pointer()->Set(nullptr);
183}
184
185// static
186MessageLoop* MessageLoop::current() {
187  // TODO(darin): sadly, we cannot enable this yet since people call us even
188  // when they have no intention of using us.
189  // DCHECK(loop) << "Ouch, did you forget to initialize me?";
190  return lazy_tls_ptr.Pointer()->Get();
191}
192
193// static
194void MessageLoop::EnableHistogrammer(bool enable) {
195  enable_histogrammer_ = enable;
196}
197
198// static
199bool MessageLoop::InitMessagePumpForUIFactory(MessagePumpFactory* factory) {
200  if (message_pump_for_ui_factory_)
201    return false;
202
203  message_pump_for_ui_factory_ = factory;
204  return true;
205}
206
207// static
208scoped_ptr<MessagePump> MessageLoop::CreateMessagePumpForType(Type type) {
209// TODO(rvargas): Get rid of the OS guards.
210#if defined(USE_GLIB) && !defined(OS_NACL)
211  typedef MessagePumpGlib MessagePumpForUI;
212#elif defined(OS_LINUX) && !defined(OS_NACL)
213  typedef MessagePumpLibevent MessagePumpForUI;
214#endif
215
216#if defined(OS_IOS) || defined(OS_MACOSX)
217#define MESSAGE_PUMP_UI scoped_ptr<MessagePump>(MessagePumpMac::Create())
218#elif defined(OS_NACL)
219// Currently NaCl doesn't have a UI MessageLoop.
220// TODO(abarth): Figure out if we need this.
221#define MESSAGE_PUMP_UI scoped_ptr<MessagePump>()
222#else
223#define MESSAGE_PUMP_UI scoped_ptr<MessagePump>(new MessagePumpForUI())
224#endif
225
226#if defined(OS_MACOSX)
227  // Use an OS native runloop on Mac to support timer coalescing.
228  #define MESSAGE_PUMP_DEFAULT \
229      scoped_ptr<MessagePump>(new MessagePumpCFRunLoop())
230#else
231  #define MESSAGE_PUMP_DEFAULT scoped_ptr<MessagePump>(new MessagePumpDefault())
232#endif
233
234  if (type == MessageLoop::TYPE_UI) {
235    if (message_pump_for_ui_factory_)
236      return message_pump_for_ui_factory_();
237    return MESSAGE_PUMP_UI;
238  }
239  if (type == MessageLoop::TYPE_IO)
240    return scoped_ptr<MessagePump>(new MessagePumpForIO());
241
242#if defined(OS_ANDROID)
243  if (type == MessageLoop::TYPE_JAVA)
244    return scoped_ptr<MessagePump>(new MessagePumpForUI());
245#endif
246
247  DCHECK_EQ(MessageLoop::TYPE_DEFAULT, type);
248  return MESSAGE_PUMP_DEFAULT;
249}
250
251void MessageLoop::AddDestructionObserver(
252    DestructionObserver* destruction_observer) {
253  DCHECK_EQ(this, current());
254  destruction_observers_.AddObserver(destruction_observer);
255}
256
257void MessageLoop::RemoveDestructionObserver(
258    DestructionObserver* destruction_observer) {
259  DCHECK_EQ(this, current());
260  destruction_observers_.RemoveObserver(destruction_observer);
261}
262
263void MessageLoop::PostTask(
264    const tracked_objects::Location& from_here,
265    const Closure& task) {
266  task_runner_->PostTask(from_here, task);
267}
268
269void MessageLoop::PostDelayedTask(
270    const tracked_objects::Location& from_here,
271    const Closure& task,
272    TimeDelta delay) {
273  task_runner_->PostDelayedTask(from_here, task, delay);
274}
275
276void MessageLoop::PostNonNestableTask(
277    const tracked_objects::Location& from_here,
278    const Closure& task) {
279  task_runner_->PostNonNestableTask(from_here, task);
280}
281
282void MessageLoop::PostNonNestableDelayedTask(
283    const tracked_objects::Location& from_here,
284    const Closure& task,
285    TimeDelta delay) {
286  task_runner_->PostNonNestableDelayedTask(from_here, task, delay);
287}
288
289void MessageLoop::Run() {
290  DCHECK(pump_);
291  RunLoop run_loop;
292  run_loop.Run();
293}
294
295void MessageLoop::RunUntilIdle() {
296  DCHECK(pump_);
297  RunLoop run_loop;
298  run_loop.RunUntilIdle();
299}
300
301void MessageLoop::QuitWhenIdle() {
302  DCHECK_EQ(this, current());
303  if (run_loop_) {
304    run_loop_->quit_when_idle_received_ = true;
305  } else {
306    NOTREACHED() << "Must be inside Run to call Quit";
307  }
308}
309
310void MessageLoop::QuitNow() {
311  DCHECK_EQ(this, current());
312  if (run_loop_) {
313    pump_->Quit();
314  } else {
315    NOTREACHED() << "Must be inside Run to call Quit";
316  }
317}
318
319bool MessageLoop::IsType(Type type) const {
320  return type_ == type;
321}
322
323static void QuitCurrentWhenIdle() {
324  MessageLoop::current()->QuitWhenIdle();
325}
326
327// static
328Closure MessageLoop::QuitWhenIdleClosure() {
329  return Bind(&QuitCurrentWhenIdle);
330}
331
332void MessageLoop::SetNestableTasksAllowed(bool allowed) {
333  if (allowed) {
334    // Kick the native pump just in case we enter a OS-driven nested message
335    // loop.
336    pump_->ScheduleWork();
337  }
338  nestable_tasks_allowed_ = allowed;
339}
340
341bool MessageLoop::NestableTasksAllowed() const {
342  return nestable_tasks_allowed_;
343}
344
345bool MessageLoop::IsNested() {
346  return run_loop_->run_depth_ > 1;
347}
348
349void MessageLoop::AddTaskObserver(TaskObserver* task_observer) {
350  DCHECK_EQ(this, current());
351  task_observers_.AddObserver(task_observer);
352}
353
354void MessageLoop::RemoveTaskObserver(TaskObserver* task_observer) {
355  DCHECK_EQ(this, current());
356  task_observers_.RemoveObserver(task_observer);
357}
358
359bool MessageLoop::is_running() const {
360  DCHECK_EQ(this, current());
361  return run_loop_ != NULL;
362}
363
364bool MessageLoop::HasHighResolutionTasks() {
365  return incoming_task_queue_->HasHighResolutionTasks();
366}
367
368bool MessageLoop::IsIdleForTesting() {
369  // We only check the incoming queue, since we don't want to lock the work
370  // queue.
371  return incoming_task_queue_->IsIdleForTesting();
372}
373
374//------------------------------------------------------------------------------
375
376// static
377scoped_ptr<MessageLoop> MessageLoop::CreateUnbound(
378    Type type, MessagePumpFactoryCallback pump_factory) {
379  return make_scoped_ptr(new MessageLoop(type, pump_factory));
380}
381
382MessageLoop::MessageLoop(Type type, MessagePumpFactoryCallback pump_factory)
383    : type_(type),
384#if defined(OS_WIN)
385      pending_high_res_tasks_(0),
386      in_high_res_mode_(false),
387#endif
388      nestable_tasks_allowed_(true),
389#if defined(OS_WIN)
390      os_modal_loop_(false),
391#endif  // OS_WIN
392      pump_factory_(pump_factory),
393      message_histogram_(NULL),
394      run_loop_(NULL),
395      incoming_task_queue_(new internal::IncomingTaskQueue(this)),
396      unbound_task_runner_(
397          new internal::MessageLoopTaskRunner(incoming_task_queue_)),
398      task_runner_(unbound_task_runner_) {
399  // If type is TYPE_CUSTOM non-null pump_factory must be given.
400  DCHECK_EQ(type_ == TYPE_CUSTOM, !pump_factory_.is_null());
401}
402
403void MessageLoop::BindToCurrentThread() {
404  DCHECK(!pump_);
405  if (!pump_factory_.is_null())
406    pump_ = pump_factory_.Run();
407  else
408    pump_ = CreateMessagePumpForType(type_);
409
410  DCHECK(!current()) << "should only have one message loop per thread";
411  lazy_tls_ptr.Pointer()->Set(this);
412
413  incoming_task_queue_->StartScheduling();
414  unbound_task_runner_->BindToCurrentThread();
415  unbound_task_runner_ = nullptr;
416  SetThreadTaskRunnerHandle();
417}
418
419void MessageLoop::SetTaskRunner(
420    scoped_refptr<SingleThreadTaskRunner> task_runner) {
421  DCHECK_EQ(this, current());
422  DCHECK(task_runner->BelongsToCurrentThread());
423  DCHECK(!unbound_task_runner_);
424  task_runner_ = std::move(task_runner);
425  SetThreadTaskRunnerHandle();
426}
427
428void MessageLoop::SetThreadTaskRunnerHandle() {
429  DCHECK_EQ(this, current());
430  // Clear the previous thread task runner first, because only one can exist at
431  // a time.
432  thread_task_runner_handle_.reset();
433  thread_task_runner_handle_.reset(new ThreadTaskRunnerHandle(task_runner_));
434}
435
436void MessageLoop::RunHandler() {
437  DCHECK_EQ(this, current());
438
439  StartHistogrammer();
440
441#if defined(OS_WIN)
442  if (run_loop_->dispatcher_ && type() == TYPE_UI) {
443    static_cast<MessagePumpForUI*>(pump_.get())->
444        RunWithDispatcher(this, run_loop_->dispatcher_);
445    return;
446  }
447#endif
448
449  pump_->Run(this);
450}
451
452bool MessageLoop::ProcessNextDelayedNonNestableTask() {
453  if (run_loop_->run_depth_ != 1)
454    return false;
455
456  if (deferred_non_nestable_work_queue_.empty())
457    return false;
458
459  PendingTask pending_task = deferred_non_nestable_work_queue_.front();
460  deferred_non_nestable_work_queue_.pop();
461
462  RunTask(pending_task);
463  return true;
464}
465
466void MessageLoop::RunTask(const PendingTask& pending_task) {
467  DCHECK(nestable_tasks_allowed_);
468
469#if defined(OS_WIN)
470  if (pending_task.is_high_res) {
471    pending_high_res_tasks_--;
472    CHECK_GE(pending_high_res_tasks_, 0);
473  }
474#endif
475
476  // Execute the task and assume the worst: It is probably not reentrant.
477  nestable_tasks_allowed_ = false;
478
479  HistogramEvent(kTaskRunEvent);
480
481  TRACE_TASK_EXECUTION("MessageLoop::RunTask", pending_task);
482
483  FOR_EACH_OBSERVER(TaskObserver, task_observers_,
484                    WillProcessTask(pending_task));
485  task_annotator_.RunTask("MessageLoop::PostTask", pending_task);
486  FOR_EACH_OBSERVER(TaskObserver, task_observers_,
487                    DidProcessTask(pending_task));
488
489  nestable_tasks_allowed_ = true;
490}
491
492bool MessageLoop::DeferOrRunPendingTask(const PendingTask& pending_task) {
493  if (pending_task.nestable || run_loop_->run_depth_ == 1) {
494    RunTask(pending_task);
495    // Show that we ran a task (Note: a new one might arrive as a
496    // consequence!).
497    return true;
498  }
499
500  // We couldn't run the task now because we're in a nested message loop
501  // and the task isn't nestable.
502  deferred_non_nestable_work_queue_.push(pending_task);
503  return false;
504}
505
506void MessageLoop::AddToDelayedWorkQueue(const PendingTask& pending_task) {
507  // Move to the delayed work queue.
508  delayed_work_queue_.push(pending_task);
509}
510
511bool MessageLoop::DeletePendingTasks() {
512  bool did_work = !work_queue_.empty();
513  while (!work_queue_.empty()) {
514    PendingTask pending_task = work_queue_.front();
515    work_queue_.pop();
516    if (!pending_task.delayed_run_time.is_null()) {
517      // We want to delete delayed tasks in the same order in which they would
518      // normally be deleted in case of any funny dependencies between delayed
519      // tasks.
520      AddToDelayedWorkQueue(pending_task);
521    }
522  }
523  did_work |= !deferred_non_nestable_work_queue_.empty();
524  while (!deferred_non_nestable_work_queue_.empty()) {
525    deferred_non_nestable_work_queue_.pop();
526  }
527  did_work |= !delayed_work_queue_.empty();
528
529  // Historically, we always delete the task regardless of valgrind status. It's
530  // not completely clear why we want to leak them in the loops above.  This
531  // code is replicating legacy behavior, and should not be considered
532  // absolutely "correct" behavior.  See TODO above about deleting all tasks
533  // when it's safe.
534  while (!delayed_work_queue_.empty()) {
535    delayed_work_queue_.pop();
536  }
537  return did_work;
538}
539
540void MessageLoop::ReloadWorkQueue() {
541  // We can improve performance of our loading tasks from the incoming queue to
542  // |*work_queue| by waiting until the last minute (|*work_queue| is empty) to
543  // load. That reduces the number of locks-per-task significantly when our
544  // queues get large.
545  if (work_queue_.empty()) {
546#if defined(OS_WIN)
547    pending_high_res_tasks_ +=
548        incoming_task_queue_->ReloadWorkQueue(&work_queue_);
549#else
550    incoming_task_queue_->ReloadWorkQueue(&work_queue_);
551#endif
552  }
553}
554
555void MessageLoop::ScheduleWork() {
556  pump_->ScheduleWork();
557}
558
559//------------------------------------------------------------------------------
560// Method and data for histogramming events and actions taken by each instance
561// on each thread.
562
563void MessageLoop::StartHistogrammer() {
564#if !defined(OS_NACL)  // NaCl build has no metrics code.
565  if (enable_histogrammer_ && !message_histogram_
566      && StatisticsRecorder::IsActive()) {
567    DCHECK(!thread_name_.empty());
568    message_histogram_ = LinearHistogram::FactoryGetWithRangeDescription(
569        "MsgLoop:" + thread_name_,
570        kLeastNonZeroMessageId, kMaxMessageId,
571        kNumberOfDistinctMessagesDisplayed,
572        HistogramBase::kHexRangePrintingFlag,
573        event_descriptions_);
574  }
575#endif
576}
577
578void MessageLoop::HistogramEvent(int event) {
579#if !defined(OS_NACL)
580  if (message_histogram_)
581    message_histogram_->Add(event);
582#endif
583}
584
585bool MessageLoop::DoWork() {
586  if (!nestable_tasks_allowed_) {
587    // Task can't be executed right now.
588    return false;
589  }
590
591  for (;;) {
592    ReloadWorkQueue();
593    if (work_queue_.empty())
594      break;
595
596    // Execute oldest task.
597    do {
598      PendingTask pending_task = work_queue_.front();
599      work_queue_.pop();
600      if (!pending_task.delayed_run_time.is_null()) {
601        AddToDelayedWorkQueue(pending_task);
602        // If we changed the topmost task, then it is time to reschedule.
603        if (delayed_work_queue_.top().task.Equals(pending_task.task))
604          pump_->ScheduleDelayedWork(pending_task.delayed_run_time);
605      } else {
606        if (DeferOrRunPendingTask(pending_task))
607          return true;
608      }
609    } while (!work_queue_.empty());
610  }
611
612  // Nothing happened.
613  return false;
614}
615
616bool MessageLoop::DoDelayedWork(TimeTicks* next_delayed_work_time) {
617  if (!nestable_tasks_allowed_ || delayed_work_queue_.empty()) {
618    recent_time_ = *next_delayed_work_time = TimeTicks();
619    return false;
620  }
621
622  // When we "fall behind", there will be a lot of tasks in the delayed work
623  // queue that are ready to run.  To increase efficiency when we fall behind,
624  // we will only call Time::Now() intermittently, and then process all tasks
625  // that are ready to run before calling it again.  As a result, the more we
626  // fall behind (and have a lot of ready-to-run delayed tasks), the more
627  // efficient we'll be at handling the tasks.
628
629  TimeTicks next_run_time = delayed_work_queue_.top().delayed_run_time;
630  if (next_run_time > recent_time_) {
631    recent_time_ = TimeTicks::Now();  // Get a better view of Now();
632    if (next_run_time > recent_time_) {
633      *next_delayed_work_time = next_run_time;
634      return false;
635    }
636  }
637
638  PendingTask pending_task = delayed_work_queue_.top();
639  delayed_work_queue_.pop();
640
641  if (!delayed_work_queue_.empty())
642    *next_delayed_work_time = delayed_work_queue_.top().delayed_run_time;
643
644  return DeferOrRunPendingTask(pending_task);
645}
646
647bool MessageLoop::DoIdleWork() {
648  if (ProcessNextDelayedNonNestableTask())
649    return true;
650
651  if (run_loop_->quit_when_idle_received_)
652    pump_->Quit();
653
654  // When we return we will do a kernel wait for more tasks.
655#if defined(OS_WIN)
656  // On Windows we activate the high resolution timer so that the wait
657  // _if_ triggered by the timer happens with good resolution. If we don't
658  // do this the default resolution is 15ms which might not be acceptable
659  // for some tasks.
660  bool high_res = pending_high_res_tasks_ > 0;
661  if (high_res != in_high_res_mode_) {
662    in_high_res_mode_ = high_res;
663    Time::ActivateHighResolutionTimer(in_high_res_mode_);
664  }
665#endif
666  return false;
667}
668
669void MessageLoop::DeleteSoonInternal(const tracked_objects::Location& from_here,
670                                     void(*deleter)(const void*),
671                                     const void* object) {
672  PostNonNestableTask(from_here, Bind(deleter, object));
673}
674
675void MessageLoop::ReleaseSoonInternal(
676    const tracked_objects::Location& from_here,
677    void(*releaser)(const void*),
678    const void* object) {
679  PostNonNestableTask(from_here, Bind(releaser, object));
680}
681
682#if !defined(OS_NACL)
683//------------------------------------------------------------------------------
684// MessageLoopForUI
685
686#if defined(OS_ANDROID)
687void MessageLoopForUI::Start() {
688  // No Histogram support for UI message loop as it is managed by Java side
689  static_cast<MessagePumpForUI*>(pump_.get())->Start(this);
690}
691#endif
692
693#if defined(OS_IOS)
694void MessageLoopForUI::Attach() {
695  static_cast<MessagePumpUIApplication*>(pump_.get())->Attach(this);
696}
697#endif
698
699#if defined(USE_OZONE) || (defined(USE_X11) && !defined(USE_GLIB))
700bool MessageLoopForUI::WatchFileDescriptor(
701    int fd,
702    bool persistent,
703    MessagePumpLibevent::Mode mode,
704    MessagePumpLibevent::FileDescriptorWatcher *controller,
705    MessagePumpLibevent::Watcher *delegate) {
706  return static_cast<MessagePumpLibevent*>(pump_.get())->WatchFileDescriptor(
707      fd,
708      persistent,
709      mode,
710      controller,
711      delegate);
712}
713#endif
714
715#endif  // !defined(OS_NACL)
716
717//------------------------------------------------------------------------------
718// MessageLoopForIO
719
720MessageLoopForIO::MessageLoopForIO() : MessageLoop(TYPE_IO) {}
721
722#if !defined(OS_NACL_SFI)
723void MessageLoopForIO::AddIOObserver(
724    MessageLoopForIO::IOObserver* io_observer) {
725  ToPumpIO(pump_.get())->AddIOObserver(io_observer);
726}
727
728void MessageLoopForIO::RemoveIOObserver(
729    MessageLoopForIO::IOObserver* io_observer) {
730  ToPumpIO(pump_.get())->RemoveIOObserver(io_observer);
731}
732
733#if defined(OS_WIN)
734void MessageLoopForIO::RegisterIOHandler(HANDLE file, IOHandler* handler) {
735  ToPumpIO(pump_.get())->RegisterIOHandler(file, handler);
736}
737
738bool MessageLoopForIO::RegisterJobObject(HANDLE job, IOHandler* handler) {
739  return ToPumpIO(pump_.get())->RegisterJobObject(job, handler);
740}
741
742bool MessageLoopForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
743  return ToPumpIO(pump_.get())->WaitForIOCompletion(timeout, filter);
744}
745#elif defined(OS_POSIX)
746bool MessageLoopForIO::WatchFileDescriptor(int fd,
747                                           bool persistent,
748                                           Mode mode,
749                                           FileDescriptorWatcher* controller,
750                                           Watcher* delegate) {
751  return ToPumpIO(pump_.get())->WatchFileDescriptor(
752      fd,
753      persistent,
754      mode,
755      controller,
756      delegate);
757}
758#endif
759
760#endif  // !defined(OS_NACL_SFI)
761
762}  // namespace base
763