ipc_sync_channel_unittest.cc revision 9ab5563a3196760eb381d102cbb2bc0f7abc6a50
1ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// Use of this source code is governed by a BSD-style license that can be
3ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// found in the LICENSE file.
4ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
5ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao#include "ipc/ipc_sync_channel.h"
6ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
7ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao#include <string>
8ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao#include <vector>
9ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
10ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao#include "base/basictypes.h"
11ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao#include "base/bind.h"
12ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao#include "base/logging.h"
13ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao#include "base/memory/scoped_ptr.h"
14ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao#include "base/message_loop/message_loop.h"
15ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao#include "base/process_util.h"
16ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao#include "base/run_loop.h"
17ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao#include "base/strings/string_util.h"
18ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao#include "base/synchronization/waitable_event.h"
19ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao#include "base/threading/platform_thread.h"
20ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao#include "base/threading/thread.h"
21ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao#include "ipc/ipc_listener.h"
22ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao#include "ipc/ipc_message.h"
23ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao#include "ipc/ipc_sender.h"
24ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao#include "ipc/ipc_sync_message_filter.h"
25ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao#include "ipc/ipc_sync_message_unittest.h"
26ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao#include "testing/gtest/include/gtest/gtest.h"
27ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
28ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaousing base::WaitableEvent;
29ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
30ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaonamespace IPC {
31ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaonamespace {
32ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
33ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// Base class for a "process" with listener and IPC threads.
34ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaoclass Worker : public Listener, public Sender {
35ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao public:
36ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  // Will create a channel without a name.
37ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Worker(Channel::Mode mode, const std::string& thread_name)
38ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      : done_(new WaitableEvent(false, false)),
39ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        channel_created_(new WaitableEvent(false, false)),
40ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        mode_(mode),
41ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        ipc_thread_((thread_name + "_ipc").c_str()),
42ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        listener_thread_((thread_name + "_listener").c_str()),
43ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        overrided_thread_(NULL),
44ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        shutdown_event_(true, false),
45ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        is_shutdown_(false) {
46ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
47ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
48ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  // Will create a named channel and use this name for the threads' name.
49ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Worker(const std::string& channel_name, Channel::Mode mode)
50ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      : done_(new WaitableEvent(false, false)),
51ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        channel_created_(new WaitableEvent(false, false)),
52ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        channel_name_(channel_name),
53ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        mode_(mode),
54ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        ipc_thread_((channel_name + "_ipc").c_str()),
55ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        listener_thread_((channel_name + "_listener").c_str()),
56ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        overrided_thread_(NULL),
57ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        shutdown_event_(true, false),
58ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        is_shutdown_(false) {
59ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
60ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
61ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual ~Worker() {
62ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    // Shutdown() must be called before destruction.
63ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    CHECK(is_shutdown_);
64ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
65ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  void AddRef() { }
66ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  void Release() { }
67ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual bool Send(Message* msg) OVERRIDE { return channel_->Send(msg); }
68ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  bool SendWithTimeout(Message* msg, int timeout_ms) {
69ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    return channel_->SendWithTimeout(msg, timeout_ms);
70ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
71ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  void WaitForChannelCreation() { channel_created_->Wait(); }
72ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  void CloseChannel() {
73ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    DCHECK(base::MessageLoop::current() == ListenerThread()->message_loop());
74ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    channel_->Close();
75ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
76ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  void Start() {
77ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    StartThread(&listener_thread_, base::MessageLoop::TYPE_DEFAULT);
78ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    ListenerThread()->message_loop()->PostTask(
79ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        FROM_HERE, base::Bind(&Worker::OnStart, this));
80ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
81ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  void Shutdown() {
82ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    // The IPC thread needs to outlive SyncChannel. We can't do this in
83ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    // ~Worker(), since that'll reset the vtable pointer (to Worker's), which
84ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    // may result in a race conditions. See http://crbug.com/25841.
85ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    WaitableEvent listener_done(false, false), ipc_done(false, false);
86ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    ListenerThread()->message_loop()->PostTask(
87ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        FROM_HERE, base::Bind(&Worker::OnListenerThreadShutdown1, this,
88ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                              &listener_done, &ipc_done));
89ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    listener_done.Wait();
90ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    ipc_done.Wait();
91ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    ipc_thread_.Stop();
92ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    listener_thread_.Stop();
93ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    is_shutdown_ = true;
94ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
95ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  void OverrideThread(base::Thread* overrided_thread) {
96ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    DCHECK(overrided_thread_ == NULL);
97ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    overrided_thread_ = overrided_thread;
98ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
99ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  bool SendAnswerToLife(bool pump, int timeout, bool succeed) {
100ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    int answer = 0;
101ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    SyncMessage* msg = new SyncChannelTestMsg_AnswerToLife(&answer);
102ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    if (pump)
103ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      msg->EnableMessagePumping();
104ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    bool result = SendWithTimeout(msg, timeout);
105ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    DCHECK_EQ(result, succeed);
106ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    DCHECK_EQ(answer, (succeed ? 42 : 0));
107ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    return result;
108ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
109ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  bool SendDouble(bool pump, bool succeed) {
110ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    int answer = 0;
111ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    SyncMessage* msg = new SyncChannelTestMsg_Double(5, &answer);
112ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    if (pump)
113ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      msg->EnableMessagePumping();
114ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    bool result = Send(msg);
115ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    DCHECK_EQ(result, succeed);
116ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    DCHECK_EQ(answer, (succeed ? 10 : 0));
117ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    return result;
118ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
119ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  const std::string& channel_name() { return channel_name_; }
120ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Channel::Mode mode() { return mode_; }
121ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  WaitableEvent* done_event() { return done_.get(); }
122ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  WaitableEvent* shutdown_event() { return &shutdown_event_; }
123ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  void ResetChannel() { channel_.reset(); }
124ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  // Derived classes need to call this when they've completed their part of
125ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  // the test.
126ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  void Done() { done_->Signal(); }
127ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
128ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao protected:
129ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  SyncChannel* channel() { return channel_.get(); }
130ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  // Functions for dervied classes to implement if they wish.
131ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void Run() { }
132ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void OnAnswer(int* answer) { NOTREACHED(); }
133ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void OnAnswerDelay(Message* reply_msg) {
134ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    // The message handler map below can only take one entry for
135ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    // SyncChannelTestMsg_AnswerToLife, so since some classes want
136ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    // the normal version while other want the delayed reply, we
137ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    // call the normal version if the derived class didn't override
138ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    // this function.
139ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    int answer;
140ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    OnAnswer(&answer);
141ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    SyncChannelTestMsg_AnswerToLife::WriteReplyParams(reply_msg, answer);
142ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Send(reply_msg);
143ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
144ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void OnDouble(int in, int* out) { NOTREACHED(); }
145ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void OnDoubleDelay(int in, Message* reply_msg) {
146ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    int result;
147ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    OnDouble(in, &result);
148ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    SyncChannelTestMsg_Double::WriteReplyParams(reply_msg, result);
149ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Send(reply_msg);
150ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
151ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
152ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void OnNestedTestMsg(Message* reply_msg) {
153ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    NOTREACHED();
154ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
155ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
156ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual SyncChannel* CreateChannel() {
157ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    return new SyncChannel(channel_name_,
158ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                           mode_,
159ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                           this,
160ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                           ipc_thread_.message_loop_proxy().get(),
161ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                           true,
162ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                           &shutdown_event_);
163ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
164ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
165ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  base::Thread* ListenerThread() {
166ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    return overrided_thread_ ? overrided_thread_ : &listener_thread_;
167ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
168ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
169ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  const base::Thread& ipc_thread() const { return ipc_thread_; }
170ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
171ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao private:
172ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  // Called on the listener thread to create the sync channel.
173ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  void OnStart() {
174ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    // Link ipc_thread_, listener_thread_ and channel_ altogether.
175ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    StartThread(&ipc_thread_, base::MessageLoop::TYPE_IO);
176ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    channel_.reset(CreateChannel());
177ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    channel_created_->Signal();
178ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Run();
179ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
180ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
181ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  void OnListenerThreadShutdown1(WaitableEvent* listener_event,
182ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                                 WaitableEvent* ipc_event) {
183ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    // SyncChannel needs to be destructed on the thread that it was created on.
184ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    channel_.reset();
185ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
186ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    base::RunLoop().RunUntilIdle();
187ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
188ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    ipc_thread_.message_loop()->PostTask(
189ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        FROM_HERE, base::Bind(&Worker::OnIPCThreadShutdown, this,
190ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                              listener_event, ipc_event));
191ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
192ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
193ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  void OnIPCThreadShutdown(WaitableEvent* listener_event,
194ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                           WaitableEvent* ipc_event) {
195ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    base::RunLoop().RunUntilIdle();
196ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    ipc_event->Signal();
197ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
198ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    listener_thread_.message_loop()->PostTask(
199ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        FROM_HERE, base::Bind(&Worker::OnListenerThreadShutdown2, this,
200ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                              listener_event));
201ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
202ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
203ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  void OnListenerThreadShutdown2(WaitableEvent* listener_event) {
204ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    base::RunLoop().RunUntilIdle();
205ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    listener_event->Signal();
206ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
207ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
208ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual bool OnMessageReceived(const Message& message) OVERRIDE {
209ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    IPC_BEGIN_MESSAGE_MAP(Worker, message)
210ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao     IPC_MESSAGE_HANDLER_DELAY_REPLY(SyncChannelTestMsg_Double, OnDoubleDelay)
211ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao     IPC_MESSAGE_HANDLER_DELAY_REPLY(SyncChannelTestMsg_AnswerToLife,
212ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                                     OnAnswerDelay)
213ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao     IPC_MESSAGE_HANDLER_DELAY_REPLY(SyncChannelNestedTestMsg_String,
214ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                                     OnNestedTestMsg)
215ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    IPC_END_MESSAGE_MAP()
216ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    return true;
217ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
218ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
219ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  void StartThread(base::Thread* thread, base::MessageLoop::Type type) {
220ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    base::Thread::Options options;
221ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    options.message_loop_type = type;
222ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    thread->StartWithOptions(options);
223ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
224ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
225ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  scoped_ptr<WaitableEvent> done_;
226ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  scoped_ptr<WaitableEvent> channel_created_;
227ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  std::string channel_name_;
228ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Channel::Mode mode_;
229ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  scoped_ptr<SyncChannel> channel_;
230ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  base::Thread ipc_thread_;
231ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  base::Thread listener_thread_;
232ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  base::Thread* overrided_thread_;
233ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
234ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  base::WaitableEvent shutdown_event_;
235ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
236ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  bool is_shutdown_;
237ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
238ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  DISALLOW_COPY_AND_ASSIGN(Worker);
239ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao};
240ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
241ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
242ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// Starts the test with the given workers.  This function deletes the workers
243ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// when it's done.
244ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaovoid RunTest(std::vector<Worker*> workers) {
245ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  // First we create the workers that are channel servers, or else the other
246ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  // workers' channel initialization might fail because the pipe isn't created..
247ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  for (size_t i = 0; i < workers.size(); ++i) {
248ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    if (workers[i]->mode() & Channel::MODE_SERVER_FLAG) {
249ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      workers[i]->Start();
250ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      workers[i]->WaitForChannelCreation();
251ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    }
252ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
253ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
254ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  // now create the clients
255ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  for (size_t i = 0; i < workers.size(); ++i) {
256ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    if (workers[i]->mode() & Channel::MODE_CLIENT_FLAG)
257ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      workers[i]->Start();
258ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
259ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
260ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  // wait for all the workers to finish
261ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  for (size_t i = 0; i < workers.size(); ++i)
262ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    workers[i]->done_event()->Wait();
263ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
264ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  for (size_t i = 0; i < workers.size(); ++i) {
265ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    workers[i]->Shutdown();
266ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    delete workers[i];
267ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
268ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao}
269ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
270ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaoclass IPCSyncChannelTest : public testing::Test {
271ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao private:
272ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  base::MessageLoop message_loop_;
273ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao};
274ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
275ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao//------------------------------------------------------------------------------
276ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
277ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaoclass SimpleServer : public Worker {
278ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao public:
279ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  explicit SimpleServer(bool pump_during_send)
280ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      : Worker(Channel::MODE_SERVER, "simpler_server"),
281ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        pump_during_send_(pump_during_send) { }
282ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void Run() OVERRIDE {
283ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    SendAnswerToLife(pump_during_send_, base::kNoTimeout, true);
284ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Done();
285ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
286ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
287ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  bool pump_during_send_;
288ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao};
289ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
290ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaoclass SimpleClient : public Worker {
291ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao public:
292ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  SimpleClient() : Worker(Channel::MODE_CLIENT, "simple_client") { }
293ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
294ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void OnAnswer(int* answer) OVERRIDE {
295ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    *answer = 42;
296ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Done();
297ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
298ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao};
299ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
300ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaovoid Simple(bool pump_during_send) {
301ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  std::vector<Worker*> workers;
302ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(new SimpleServer(pump_during_send));
303ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(new SimpleClient());
304ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  RunTest(workers);
305ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao}
306ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
307ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// Tests basic synchronous call
308ea285162342df160e7860e26528bc7110bc6c0cdShih-wei LiaoTEST_F(IPCSyncChannelTest, Simple) {
309ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Simple(false);
310ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Simple(true);
311ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao}
312ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
313ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao//------------------------------------------------------------------------------
314ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
315ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// Worker classes which override how the sync channel is created to use the
316ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// two-step initialization (calling the lightweight constructor and then
317ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// ChannelProxy::Init separately) process.
318ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaoclass TwoStepServer : public Worker {
319ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao public:
320ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  explicit TwoStepServer(bool create_pipe_now)
321ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      : Worker(Channel::MODE_SERVER, "simpler_server"),
322ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        create_pipe_now_(create_pipe_now) { }
323ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
324ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void Run() OVERRIDE {
325ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    SendAnswerToLife(false, base::kNoTimeout, true);
326ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Done();
327ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
328ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
329ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual SyncChannel* CreateChannel() OVERRIDE {
330ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    SyncChannel* channel = new SyncChannel(
331ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        this, ipc_thread().message_loop_proxy().get(), shutdown_event());
332ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    channel->Init(channel_name(), mode(), create_pipe_now_);
333ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    return channel;
334ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
335ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
336ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  bool create_pipe_now_;
337ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao};
338ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
339ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaoclass TwoStepClient : public Worker {
340ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao public:
341ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  TwoStepClient(bool create_pipe_now)
342ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      : Worker(Channel::MODE_CLIENT, "simple_client"),
343ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        create_pipe_now_(create_pipe_now) { }
344ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
345ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void OnAnswer(int* answer) OVERRIDE {
346ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    *answer = 42;
347ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Done();
348ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
349ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
350ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual SyncChannel* CreateChannel() OVERRIDE {
351ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    SyncChannel* channel = new SyncChannel(
352ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        this, ipc_thread().message_loop_proxy().get(), shutdown_event());
353ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    channel->Init(channel_name(), mode(), create_pipe_now_);
354ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    return channel;
355ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
356ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
357ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  bool create_pipe_now_;
358ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao};
359ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
360ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaovoid TwoStep(bool create_server_pipe_now, bool create_client_pipe_now) {
361ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  std::vector<Worker*> workers;
362ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(new TwoStepServer(create_server_pipe_now));
363ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(new TwoStepClient(create_client_pipe_now));
364ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  RunTest(workers);
365ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao}
366ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
367ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// Tests basic two-step initialization, where you call the lightweight
368ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// constructor then Init.
369ea285162342df160e7860e26528bc7110bc6c0cdShih-wei LiaoTEST_F(IPCSyncChannelTest, TwoStepInitialization) {
370ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  TwoStep(false, false);
371ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  TwoStep(false, true);
372ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  TwoStep(true, false);
373ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  TwoStep(true, true);
374ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao}
375ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
376ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao//------------------------------------------------------------------------------
377ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
378ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaoclass DelayClient : public Worker {
379ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao public:
380ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  DelayClient() : Worker(Channel::MODE_CLIENT, "delay_client") { }
381ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
382ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void OnAnswerDelay(Message* reply_msg) OVERRIDE {
383ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    SyncChannelTestMsg_AnswerToLife::WriteReplyParams(reply_msg, 42);
384ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Send(reply_msg);
385ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Done();
386ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
387ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao};
388ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
389ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaovoid DelayReply(bool pump_during_send) {
390ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  std::vector<Worker*> workers;
391ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(new SimpleServer(pump_during_send));
392ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(new DelayClient());
393ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  RunTest(workers);
394ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao}
395ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
396ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// Tests that asynchronous replies work
397ea285162342df160e7860e26528bc7110bc6c0cdShih-wei LiaoTEST_F(IPCSyncChannelTest, DelayReply) {
398ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  DelayReply(false);
399ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  DelayReply(true);
400ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao}
401ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
402ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao//------------------------------------------------------------------------------
403ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
404ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaoclass NoHangServer : public Worker {
405ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao public:
406ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  NoHangServer(WaitableEvent* got_first_reply, bool pump_during_send)
407ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      : Worker(Channel::MODE_SERVER, "no_hang_server"),
408ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        got_first_reply_(got_first_reply),
409ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        pump_during_send_(pump_during_send) { }
410ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void Run() OVERRIDE {
411ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    SendAnswerToLife(pump_during_send_, base::kNoTimeout, true);
412ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    got_first_reply_->Signal();
413ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
414ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    SendAnswerToLife(pump_during_send_, base::kNoTimeout, false);
415ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Done();
416ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
417ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
418ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  WaitableEvent* got_first_reply_;
419ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  bool pump_during_send_;
420ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao};
421ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
422ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaoclass NoHangClient : public Worker {
423ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao public:
424ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  explicit NoHangClient(WaitableEvent* got_first_reply)
425ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    : Worker(Channel::MODE_CLIENT, "no_hang_client"),
426ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      got_first_reply_(got_first_reply) { }
427ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
428ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void OnAnswerDelay(Message* reply_msg) OVERRIDE {
429ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    // Use the DELAY_REPLY macro so that we can force the reply to be sent
430ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    // before this function returns (when the channel will be reset).
431ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    SyncChannelTestMsg_AnswerToLife::WriteReplyParams(reply_msg, 42);
432ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Send(reply_msg);
433ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    got_first_reply_->Wait();
434ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    CloseChannel();
435ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Done();
436ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
437ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
438ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  WaitableEvent* got_first_reply_;
439ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao};
440ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
441ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaovoid NoHang(bool pump_during_send) {
442ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  WaitableEvent got_first_reply(false, false);
443ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  std::vector<Worker*> workers;
444ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(new NoHangServer(&got_first_reply, pump_during_send));
445ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(new NoHangClient(&got_first_reply));
446ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  RunTest(workers);
447ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao}
448ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
449ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// Tests that caller doesn't hang if receiver dies
450ea285162342df160e7860e26528bc7110bc6c0cdShih-wei LiaoTEST_F(IPCSyncChannelTest, NoHang) {
451ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  NoHang(false);
452ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  NoHang(true);
453ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao}
454ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
455ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao//------------------------------------------------------------------------------
456ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
457ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaoclass UnblockServer : public Worker {
458ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao public:
459ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  UnblockServer(bool pump_during_send, bool delete_during_send)
460ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    : Worker(Channel::MODE_SERVER, "unblock_server"),
461ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      pump_during_send_(pump_during_send),
462ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      delete_during_send_(delete_during_send) { }
463ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void Run() OVERRIDE {
464ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    if (delete_during_send_) {
465ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      // Use custom code since race conditions mean the answer may or may not be
466ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      // available.
467ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      int answer = 0;
468ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      SyncMessage* msg = new SyncChannelTestMsg_AnswerToLife(&answer);
469ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      if (pump_during_send_)
470ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        msg->EnableMessagePumping();
471ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      Send(msg);
472ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    } else {
473ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      SendAnswerToLife(pump_during_send_, base::kNoTimeout, true);
474ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    }
475ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Done();
476ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
477ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
478ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void OnDoubleDelay(int in, Message* reply_msg) OVERRIDE {
479ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    SyncChannelTestMsg_Double::WriteReplyParams(reply_msg, in * 2);
480ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Send(reply_msg);
481ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    if (delete_during_send_)
482ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      ResetChannel();
483ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
484ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
485ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  bool pump_during_send_;
486ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  bool delete_during_send_;
487ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao};
488ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
489ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaoclass UnblockClient : public Worker {
490ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao public:
491ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  explicit UnblockClient(bool pump_during_send)
492ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    : Worker(Channel::MODE_CLIENT, "unblock_client"),
493ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      pump_during_send_(pump_during_send) { }
494ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
495ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void OnAnswer(int* answer) OVERRIDE {
496ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    SendDouble(pump_during_send_, true);
497ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    *answer = 42;
498ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Done();
499ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
500ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
501ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  bool pump_during_send_;
502ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao};
503ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
504ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaovoid Unblock(bool server_pump, bool client_pump, bool delete_during_send) {
505ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  std::vector<Worker*> workers;
506ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(new UnblockServer(server_pump, delete_during_send));
507ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(new UnblockClient(client_pump));
508ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  RunTest(workers);
509ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao}
510ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
511ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// Tests that the caller unblocks to answer a sync message from the receiver.
512ea285162342df160e7860e26528bc7110bc6c0cdShih-wei LiaoTEST_F(IPCSyncChannelTest, Unblock) {
513ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Unblock(false, false, false);
514ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Unblock(false, true, false);
515ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Unblock(true, false, false);
516ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Unblock(true, true, false);
517ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao}
518ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
519ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao//------------------------------------------------------------------------------
520ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
521ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// Tests that the the SyncChannel object can be deleted during a Send.
522ea285162342df160e7860e26528bc7110bc6c0cdShih-wei LiaoTEST_F(IPCSyncChannelTest, ChannelDeleteDuringSend) {
523ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Unblock(false, false, true);
524ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Unblock(false, true, true);
525ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Unblock(true, false, true);
526ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Unblock(true, true, true);
527ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao}
528ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
529ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao//------------------------------------------------------------------------------
530ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
531ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaoclass RecursiveServer : public Worker {
532ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao public:
533ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  RecursiveServer(bool expected_send_result, bool pump_first, bool pump_second)
534ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      : Worker(Channel::MODE_SERVER, "recursive_server"),
535ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        expected_send_result_(expected_send_result),
536ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        pump_first_(pump_first), pump_second_(pump_second) {}
537ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void Run() OVERRIDE {
538ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    SendDouble(pump_first_, expected_send_result_);
539ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Done();
540ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
541ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
542ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void OnDouble(int in, int* out) OVERRIDE {
543ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    *out = in * 2;
544ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    SendAnswerToLife(pump_second_, base::kNoTimeout, expected_send_result_);
545ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
546ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
547ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  bool expected_send_result_, pump_first_, pump_second_;
548ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao};
549ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
550ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaoclass RecursiveClient : public Worker {
551ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao public:
552ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  RecursiveClient(bool pump_during_send, bool close_channel)
553ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      : Worker(Channel::MODE_CLIENT, "recursive_client"),
554ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        pump_during_send_(pump_during_send), close_channel_(close_channel) {}
555ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
556ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void OnDoubleDelay(int in, Message* reply_msg) OVERRIDE {
557ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    SendDouble(pump_during_send_, !close_channel_);
558ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    if (close_channel_) {
559ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      delete reply_msg;
560ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    } else {
561ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      SyncChannelTestMsg_Double::WriteReplyParams(reply_msg, in * 2);
562ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      Send(reply_msg);
563ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    }
564ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Done();
565ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
566ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
567ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void OnAnswerDelay(Message* reply_msg) OVERRIDE {
568ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    if (close_channel_) {
569ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      delete reply_msg;
570ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      CloseChannel();
571ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    } else {
572ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      SyncChannelTestMsg_AnswerToLife::WriteReplyParams(reply_msg, 42);
573ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      Send(reply_msg);
574ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    }
575ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
576ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
577ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  bool pump_during_send_, close_channel_;
578ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao};
579ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
580ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaovoid Recursive(
581ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    bool server_pump_first, bool server_pump_second, bool client_pump) {
582ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  std::vector<Worker*> workers;
583ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(
584ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      new RecursiveServer(true, server_pump_first, server_pump_second));
585ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(new RecursiveClient(client_pump, false));
586ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  RunTest(workers);
587ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao}
588ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
589ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// Tests a server calling Send while another Send is pending.
590ea285162342df160e7860e26528bc7110bc6c0cdShih-wei LiaoTEST_F(IPCSyncChannelTest, Recursive) {
591ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Recursive(false, false, false);
592ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Recursive(false, false, true);
593ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Recursive(false, true, false);
594ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Recursive(false, true, true);
595ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Recursive(true, false, false);
596ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Recursive(true, false, true);
597ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Recursive(true, true, false);
598ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Recursive(true, true, true);
599ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao}
600ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
601ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao//------------------------------------------------------------------------------
602ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
603ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaovoid RecursiveNoHang(
604ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    bool server_pump_first, bool server_pump_second, bool client_pump) {
605ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  std::vector<Worker*> workers;
606ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(
607ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      new RecursiveServer(false, server_pump_first, server_pump_second));
608ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(new RecursiveClient(client_pump, true));
609ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  RunTest(workers);
610ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao}
611ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
612ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// Tests that if a caller makes a sync call during an existing sync call and
613ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// the receiver dies, neither of the Send() calls hang.
614ea285162342df160e7860e26528bc7110bc6c0cdShih-wei LiaoTEST_F(IPCSyncChannelTest, RecursiveNoHang) {
615ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  RecursiveNoHang(false, false, false);
616ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  RecursiveNoHang(false, false, true);
617ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  RecursiveNoHang(false, true, false);
618ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  RecursiveNoHang(false, true, true);
619ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  RecursiveNoHang(true, false, false);
620ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  RecursiveNoHang(true, false, true);
621ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  RecursiveNoHang(true, true, false);
622ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  RecursiveNoHang(true, true, true);
623ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao}
624ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
625ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao//------------------------------------------------------------------------------
626ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
627ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaoclass MultipleServer1 : public Worker {
628ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao public:
629ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  explicit MultipleServer1(bool pump_during_send)
630ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    : Worker("test_channel1", Channel::MODE_SERVER),
631ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      pump_during_send_(pump_during_send) { }
632ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
633ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void Run() OVERRIDE {
634ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    SendDouble(pump_during_send_, true);
635ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Done();
636ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
637ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
638ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  bool pump_during_send_;
639ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao};
640ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
641ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaoclass MultipleClient1 : public Worker {
642ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao public:
643ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  MultipleClient1(WaitableEvent* client1_msg_received,
644ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                  WaitableEvent* client1_can_reply) :
645ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      Worker("test_channel1", Channel::MODE_CLIENT),
646ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      client1_msg_received_(client1_msg_received),
647ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      client1_can_reply_(client1_can_reply) { }
648ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
649ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void OnDouble(int in, int* out) OVERRIDE {
650ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    client1_msg_received_->Signal();
651ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    *out = in * 2;
652ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    client1_can_reply_->Wait();
653ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Done();
654ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
655ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
656ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao private:
657ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  WaitableEvent *client1_msg_received_, *client1_can_reply_;
658ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao};
659ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
660ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaoclass MultipleServer2 : public Worker {
661ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao public:
662ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  MultipleServer2() : Worker("test_channel2", Channel::MODE_SERVER) { }
663ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
664ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void OnAnswer(int* result) OVERRIDE {
665ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    *result = 42;
666ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Done();
667ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
668ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao};
669ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
670ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaoclass MultipleClient2 : public Worker {
671ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao public:
672ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  MultipleClient2(
673ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    WaitableEvent* client1_msg_received, WaitableEvent* client1_can_reply,
674ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    bool pump_during_send)
675ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    : Worker("test_channel2", Channel::MODE_CLIENT),
676ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      client1_msg_received_(client1_msg_received),
677ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      client1_can_reply_(client1_can_reply),
678ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      pump_during_send_(pump_during_send) { }
679ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
680ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void Run() OVERRIDE {
681ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    client1_msg_received_->Wait();
682ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    SendAnswerToLife(pump_during_send_, base::kNoTimeout, true);
683ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    client1_can_reply_->Signal();
684ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Done();
685ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
686ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
687ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao private:
688ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  WaitableEvent *client1_msg_received_, *client1_can_reply_;
689ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  bool pump_during_send_;
690ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao};
691ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
692ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaovoid Multiple(bool server_pump, bool client_pump) {
693ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  std::vector<Worker*> workers;
694ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
695ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  // A shared worker thread so that server1 and server2 run on one thread.
696ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  base::Thread worker_thread("Multiple");
697ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  ASSERT_TRUE(worker_thread.Start());
698ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
699ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  // Server1 sends a sync msg to client1, which blocks the reply until
700ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  // server2 (which runs on the same worker thread as server1) responds
701ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  // to a sync msg from client2.
702ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  WaitableEvent client1_msg_received(false, false);
703ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  WaitableEvent client1_can_reply(false, false);
704ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
705ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Worker* worker;
706ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
707ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  worker = new MultipleServer2();
708ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  worker->OverrideThread(&worker_thread);
709ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(worker);
710ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
711ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  worker = new MultipleClient2(
712ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      &client1_msg_received, &client1_can_reply, client_pump);
713ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(worker);
714ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
715ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  worker = new MultipleServer1(server_pump);
716ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  worker->OverrideThread(&worker_thread);
717ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(worker);
718ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
719ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  worker = new MultipleClient1(
720ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      &client1_msg_received, &client1_can_reply);
721ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(worker);
722ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
723ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  RunTest(workers);
724ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao}
725ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
726ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// Tests that multiple SyncObjects on the same listener thread can unblock each
727ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// other.
728ea285162342df160e7860e26528bc7110bc6c0cdShih-wei LiaoTEST_F(IPCSyncChannelTest, Multiple) {
729ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Multiple(false, false);
730ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Multiple(false, true);
731ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Multiple(true, false);
732ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Multiple(true, true);
733ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao}
734ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
735ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao//------------------------------------------------------------------------------
736ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
737ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// This class provides server side functionality to test the case where
738ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// multiple sync channels are in use on the same thread on the client and
739ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// nested calls are issued.
740ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaoclass QueuedReplyServer : public Worker {
741ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao public:
742ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  QueuedReplyServer(base::Thread* listener_thread,
743ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                    const std::string& channel_name,
744ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                    const std::string& reply_text)
745ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      : Worker(channel_name, Channel::MODE_SERVER),
746ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        reply_text_(reply_text) {
747ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Worker::OverrideThread(listener_thread);
748ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
749ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
750ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void OnNestedTestMsg(Message* reply_msg) OVERRIDE {
751ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    VLOG(1) << __FUNCTION__ << " Sending reply: " << reply_text_;
752ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    SyncChannelNestedTestMsg_String::WriteReplyParams(reply_msg, reply_text_);
753ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Send(reply_msg);
754ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Done();
755ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
756ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
757ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao private:
758ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  std::string reply_text_;
759ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao};
760ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
761ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// The QueuedReplyClient class provides functionality to test the case where
762ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// multiple sync channels are in use on the same thread and they make nested
763ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// sync calls, i.e. while the first channel waits for a response it makes a
764ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// sync call on another channel.
765ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// The callstack should unwind correctly, i.e. the outermost call should
766ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// complete first, and so on.
767ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaoclass QueuedReplyClient : public Worker {
768ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao public:
769ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  QueuedReplyClient(base::Thread* listener_thread,
770ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                    const std::string& channel_name,
771ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                    const std::string& expected_text,
772ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                    bool pump_during_send)
773ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      : Worker(channel_name, Channel::MODE_CLIENT),
774ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        pump_during_send_(pump_during_send),
775ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        expected_text_(expected_text) {
776ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Worker::OverrideThread(listener_thread);
777ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
778ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
779ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void Run() OVERRIDE {
780ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    std::string response;
781ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    SyncMessage* msg = new SyncChannelNestedTestMsg_String(&response);
782ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    if (pump_during_send_)
783ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      msg->EnableMessagePumping();
784ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    bool result = Send(msg);
785ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    DCHECK(result);
786ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    DCHECK_EQ(response, expected_text_);
787ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
788ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    VLOG(1) << __FUNCTION__ << " Received reply: " << response;
789ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Done();
790ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
791ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
792ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao private:
793ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  bool pump_during_send_;
794ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  std::string expected_text_;
795ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao};
796ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
797ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaovoid QueuedReply(bool client_pump) {
798ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  std::vector<Worker*> workers;
799ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
800ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  // A shared worker thread for servers
801ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  base::Thread server_worker_thread("QueuedReply_ServerListener");
802ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  ASSERT_TRUE(server_worker_thread.Start());
803ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
804ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  base::Thread client_worker_thread("QueuedReply_ClientListener");
805ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  ASSERT_TRUE(client_worker_thread.Start());
806ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
807ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  Worker* worker;
808ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
809ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  worker = new QueuedReplyServer(&server_worker_thread,
810ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                                 "QueuedReply_Server1",
811ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                                 "Got first message");
812ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(worker);
813ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
814ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  worker = new QueuedReplyServer(&server_worker_thread,
815ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                                 "QueuedReply_Server2",
816ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                                 "Got second message");
817ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(worker);
818ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
819ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  worker = new QueuedReplyClient(&client_worker_thread,
820ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                                 "QueuedReply_Server1",
821ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                                 "Got first message",
822ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                                 client_pump);
823ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(worker);
824ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
825ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  worker = new QueuedReplyClient(&client_worker_thread,
826ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                                 "QueuedReply_Server2",
827ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                                 "Got second message",
828ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                                 client_pump);
829ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(worker);
830ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
831ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  RunTest(workers);
832ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao}
833ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
834ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// While a blocking send is in progress, the listener thread might answer other
835ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// synchronous messages.  This tests that if during the response to another
836ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// message the reply to the original messages comes, it is queued up correctly
837ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// and the original Send is unblocked later.
838ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// We also test that the send call stacks unwind correctly when the channel
839ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// pumps messages while waiting for a response.
840ea285162342df160e7860e26528bc7110bc6c0cdShih-wei LiaoTEST_F(IPCSyncChannelTest, QueuedReply) {
841ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  QueuedReply(false);
842ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  QueuedReply(true);
843ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao}
844ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
845ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao//------------------------------------------------------------------------------
846ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
847ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaoclass ChattyClient : public Worker {
848ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao public:
849ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  ChattyClient() :
850ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      Worker(Channel::MODE_CLIENT, "chatty_client") { }
851ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
852ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void OnAnswer(int* answer) OVERRIDE {
853ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    // The PostMessage limit is 10k.  Send 20% more than that.
854ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    const int kMessageLimit = 10000;
855ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    const int kMessagesToSend = kMessageLimit * 120 / 100;
856ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    for (int i = 0; i < kMessagesToSend; ++i) {
857ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      if (!SendDouble(false, true))
858ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        break;
859ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    }
860ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    *answer = 42;
861ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Done();
862ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
863ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao};
864ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
865ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaovoid ChattyServer(bool pump_during_send) {
866ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  std::vector<Worker*> workers;
867ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(new UnblockServer(pump_during_send, false));
868ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  workers.push_back(new ChattyClient());
869ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  RunTest(workers);
870ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao}
871ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
872ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// Tests http://b/1093251 - that sending lots of sync messages while
873ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// the receiver is waiting for a sync reply does not overflow the PostMessage
874ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao// queue.
875ea285162342df160e7860e26528bc7110bc6c0cdShih-wei LiaoTEST_F(IPCSyncChannelTest, ChattyServer) {
876ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  ChattyServer(false);
877ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  ChattyServer(true);
878ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao}
879ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
880ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao//------------------------------------------------------------------------------
881ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
882ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaoclass TimeoutServer : public Worker {
883ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao public:
884ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  TimeoutServer(int timeout_ms,
885ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                std::vector<bool> timeout_seq,
886ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao                bool pump_during_send)
887ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      : Worker(Channel::MODE_SERVER, "timeout_server"),
888ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        timeout_ms_(timeout_ms),
889ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        timeout_seq_(timeout_seq),
890ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao        pump_during_send_(pump_during_send) {
891ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
892ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
893ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  virtual void Run() OVERRIDE {
894ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    for (std::vector<bool>::const_iterator iter = timeout_seq_.begin();
895ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao         iter != timeout_seq_.end(); ++iter) {
896ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      SendAnswerToLife(pump_during_send_, timeout_ms_, !*iter);
897ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    }
898ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao    Done();
899ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  }
900ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
901ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao private:
902ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  int timeout_ms_;
903ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  std::vector<bool> timeout_seq_;
904ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  bool pump_during_send_;
905ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao};
906ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao
907ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liaoclass UnresponsiveClient : public Worker {
908ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao public:
909ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao  explicit UnresponsiveClient(std::vector<bool> timeout_seq)
910ea285162342df160e7860e26528bc7110bc6c0cdShih-wei Liao      : Worker(Channel::MODE_CLIENT, "unresponsive_client"),
911        timeout_seq_(timeout_seq) {
912  }
913
914  virtual void OnAnswerDelay(Message* reply_msg) OVERRIDE {
915    DCHECK(!timeout_seq_.empty());
916    if (!timeout_seq_[0]) {
917      SyncChannelTestMsg_AnswerToLife::WriteReplyParams(reply_msg, 42);
918      Send(reply_msg);
919    } else {
920      // Don't reply.
921      delete reply_msg;
922    }
923    timeout_seq_.erase(timeout_seq_.begin());
924    if (timeout_seq_.empty())
925      Done();
926  }
927
928 private:
929  // Whether we should time-out or respond to the various messages we receive.
930  std::vector<bool> timeout_seq_;
931};
932
933void SendWithTimeoutOK(bool pump_during_send) {
934  std::vector<Worker*> workers;
935  std::vector<bool> timeout_seq;
936  timeout_seq.push_back(false);
937  timeout_seq.push_back(false);
938  timeout_seq.push_back(false);
939  workers.push_back(new TimeoutServer(5000, timeout_seq, pump_during_send));
940  workers.push_back(new SimpleClient());
941  RunTest(workers);
942}
943
944void SendWithTimeoutTimeout(bool pump_during_send) {
945  std::vector<Worker*> workers;
946  std::vector<bool> timeout_seq;
947  timeout_seq.push_back(true);
948  timeout_seq.push_back(false);
949  timeout_seq.push_back(false);
950  workers.push_back(new TimeoutServer(100, timeout_seq, pump_during_send));
951  workers.push_back(new UnresponsiveClient(timeout_seq));
952  RunTest(workers);
953}
954
955void SendWithTimeoutMixedOKAndTimeout(bool pump_during_send) {
956  std::vector<Worker*> workers;
957  std::vector<bool> timeout_seq;
958  timeout_seq.push_back(true);
959  timeout_seq.push_back(false);
960  timeout_seq.push_back(false);
961  timeout_seq.push_back(true);
962  timeout_seq.push_back(false);
963  workers.push_back(new TimeoutServer(100, timeout_seq, pump_during_send));
964  workers.push_back(new UnresponsiveClient(timeout_seq));
965  RunTest(workers);
966}
967
968// Tests that SendWithTimeout does not time-out if the response comes back fast
969// enough.
970TEST_F(IPCSyncChannelTest, SendWithTimeoutOK) {
971  SendWithTimeoutOK(false);
972  SendWithTimeoutOK(true);
973}
974
975// Tests that SendWithTimeout does time-out.
976TEST_F(IPCSyncChannelTest, SendWithTimeoutTimeout) {
977  SendWithTimeoutTimeout(false);
978  SendWithTimeoutTimeout(true);
979}
980
981// Sends some message that time-out and some that succeed.
982TEST_F(IPCSyncChannelTest, SendWithTimeoutMixedOKAndTimeout) {
983  SendWithTimeoutMixedOKAndTimeout(false);
984  SendWithTimeoutMixedOKAndTimeout(true);
985}
986
987//------------------------------------------------------------------------------
988
989void NestedCallback(Worker* server) {
990  // Sleep a bit so that we wake up after the reply has been received.
991  base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(250));
992  server->SendAnswerToLife(true, base::kNoTimeout, true);
993}
994
995bool timeout_occurred = false;
996
997void TimeoutCallback() {
998  timeout_occurred = true;
999}
1000
1001class DoneEventRaceServer : public Worker {
1002 public:
1003  DoneEventRaceServer()
1004      : Worker(Channel::MODE_SERVER, "done_event_race_server") { }
1005
1006  virtual void Run() OVERRIDE {
1007    base::MessageLoop::current()->PostTask(FROM_HERE,
1008                                           base::Bind(&NestedCallback, this));
1009    base::MessageLoop::current()->PostDelayedTask(
1010        FROM_HERE,
1011        base::Bind(&TimeoutCallback),
1012        base::TimeDelta::FromSeconds(9));
1013    // Even though we have a timeout on the Send, it will succeed since for this
1014    // bug, the reply message comes back and is deserialized, however the done
1015    // event wasn't set.  So we indirectly use the timeout task to notice if a
1016    // timeout occurred.
1017    SendAnswerToLife(true, 10000, true);
1018    DCHECK(!timeout_occurred);
1019    Done();
1020  }
1021};
1022
1023// Tests http://b/1474092 - that if after the done_event is set but before
1024// OnObjectSignaled is called another message is sent out, then after its
1025// reply comes back OnObjectSignaled will be called for the first message.
1026TEST_F(IPCSyncChannelTest, DoneEventRace) {
1027  std::vector<Worker*> workers;
1028  workers.push_back(new DoneEventRaceServer());
1029  workers.push_back(new SimpleClient());
1030  RunTest(workers);
1031}
1032
1033//------------------------------------------------------------------------------
1034
1035class TestSyncMessageFilter : public SyncMessageFilter {
1036 public:
1037  TestSyncMessageFilter(base::WaitableEvent* shutdown_event,
1038                        Worker* worker,
1039                        scoped_refptr<base::MessageLoopProxy> message_loop)
1040      : SyncMessageFilter(shutdown_event),
1041        worker_(worker),
1042        message_loop_(message_loop) {
1043  }
1044
1045  virtual void OnFilterAdded(Channel* channel) OVERRIDE {
1046    SyncMessageFilter::OnFilterAdded(channel);
1047    message_loop_->PostTask(
1048        FROM_HERE,
1049        base::Bind(&TestSyncMessageFilter::SendMessageOnHelperThread, this));
1050  }
1051
1052  void SendMessageOnHelperThread() {
1053    int answer = 0;
1054    bool result = Send(new SyncChannelTestMsg_AnswerToLife(&answer));
1055    DCHECK(result);
1056    DCHECK_EQ(answer, 42);
1057
1058    worker_->Done();
1059  }
1060
1061 private:
1062  virtual ~TestSyncMessageFilter() {}
1063
1064  Worker* worker_;
1065  scoped_refptr<base::MessageLoopProxy> message_loop_;
1066};
1067
1068class SyncMessageFilterServer : public Worker {
1069 public:
1070  SyncMessageFilterServer()
1071      : Worker(Channel::MODE_SERVER, "sync_message_filter_server"),
1072        thread_("helper_thread") {
1073    base::Thread::Options options;
1074    options.message_loop_type = base::MessageLoop::TYPE_DEFAULT;
1075    thread_.StartWithOptions(options);
1076    filter_ = new TestSyncMessageFilter(shutdown_event(), this,
1077                                        thread_.message_loop_proxy());
1078  }
1079
1080  virtual void Run() OVERRIDE {
1081    channel()->AddFilter(filter_.get());
1082  }
1083
1084  base::Thread thread_;
1085  scoped_refptr<TestSyncMessageFilter> filter_;
1086};
1087
1088// This class provides functionality to test the case that a Send on the sync
1089// channel does not crash after the channel has been closed.
1090class ServerSendAfterClose : public Worker {
1091 public:
1092  ServerSendAfterClose()
1093     : Worker(Channel::MODE_SERVER, "simpler_server"),
1094       send_result_(true) {
1095  }
1096
1097  bool SendDummy() {
1098    ListenerThread()->message_loop()->PostTask(
1099        FROM_HERE, base::Bind(base::IgnoreResult(&ServerSendAfterClose::Send),
1100                              this, new SyncChannelTestMsg_NoArgs));
1101    return true;
1102  }
1103
1104  bool send_result() const {
1105    return send_result_;
1106  }
1107
1108 private:
1109  virtual void Run() OVERRIDE {
1110    CloseChannel();
1111    Done();
1112  }
1113
1114  virtual bool Send(Message* msg) OVERRIDE {
1115    send_result_ = Worker::Send(msg);
1116    Done();
1117    return send_result_;
1118  }
1119
1120  bool send_result_;
1121};
1122
1123// Tests basic synchronous call
1124TEST_F(IPCSyncChannelTest, SyncMessageFilter) {
1125  std::vector<Worker*> workers;
1126  workers.push_back(new SyncMessageFilterServer());
1127  workers.push_back(new SimpleClient());
1128  RunTest(workers);
1129}
1130
1131// Test the case when the channel is closed and a Send is attempted after that.
1132TEST_F(IPCSyncChannelTest, SendAfterClose) {
1133  ServerSendAfterClose server;
1134  server.Start();
1135
1136  server.done_event()->Wait();
1137  server.done_event()->Reset();
1138
1139  server.SendDummy();
1140  server.done_event()->Wait();
1141
1142  EXPECT_FALSE(server.send_result());
1143
1144  server.Shutdown();
1145}
1146
1147//------------------------------------------------------------------------------
1148
1149class RestrictedDispatchServer : public Worker {
1150 public:
1151  RestrictedDispatchServer(WaitableEvent* sent_ping_event,
1152                           WaitableEvent* wait_event)
1153      : Worker("restricted_channel", Channel::MODE_SERVER),
1154        sent_ping_event_(sent_ping_event),
1155        wait_event_(wait_event) { }
1156
1157  void OnDoPing(int ping) {
1158    // Send an asynchronous message that unblocks the caller.
1159    Message* msg = new SyncChannelTestMsg_Ping(ping);
1160    msg->set_unblock(true);
1161    Send(msg);
1162    // Signal the event after the message has been sent on the channel, on the
1163    // IPC thread.
1164    ipc_thread().message_loop()->PostTask(
1165        FROM_HERE, base::Bind(&RestrictedDispatchServer::OnPingSent, this));
1166  }
1167
1168  void OnPingTTL(int ping, int* out) {
1169    *out = ping;
1170    wait_event_->Wait();
1171  }
1172
1173  base::Thread* ListenerThread() { return Worker::ListenerThread(); }
1174
1175 private:
1176  virtual bool OnMessageReceived(const Message& message) OVERRIDE {
1177    IPC_BEGIN_MESSAGE_MAP(RestrictedDispatchServer, message)
1178     IPC_MESSAGE_HANDLER(SyncChannelTestMsg_NoArgs, OnNoArgs)
1179     IPC_MESSAGE_HANDLER(SyncChannelTestMsg_PingTTL, OnPingTTL)
1180     IPC_MESSAGE_HANDLER(SyncChannelTestMsg_Done, Done)
1181    IPC_END_MESSAGE_MAP()
1182    return true;
1183  }
1184
1185  void OnPingSent() {
1186    sent_ping_event_->Signal();
1187  }
1188
1189  void OnNoArgs() { }
1190  WaitableEvent* sent_ping_event_;
1191  WaitableEvent* wait_event_;
1192};
1193
1194class NonRestrictedDispatchServer : public Worker {
1195 public:
1196  NonRestrictedDispatchServer(WaitableEvent* signal_event)
1197      : Worker("non_restricted_channel", Channel::MODE_SERVER),
1198        signal_event_(signal_event) {}
1199
1200  base::Thread* ListenerThread() { return Worker::ListenerThread(); }
1201
1202  void OnDoPingTTL(int ping) {
1203    int value = 0;
1204    Send(new SyncChannelTestMsg_PingTTL(ping, &value));
1205    signal_event_->Signal();
1206  }
1207
1208 private:
1209  virtual bool OnMessageReceived(const Message& message) OVERRIDE {
1210    IPC_BEGIN_MESSAGE_MAP(NonRestrictedDispatchServer, message)
1211     IPC_MESSAGE_HANDLER(SyncChannelTestMsg_NoArgs, OnNoArgs)
1212     IPC_MESSAGE_HANDLER(SyncChannelTestMsg_Done, Done)
1213    IPC_END_MESSAGE_MAP()
1214    return true;
1215  }
1216
1217  void OnNoArgs() { }
1218  WaitableEvent* signal_event_;
1219};
1220
1221class RestrictedDispatchClient : public Worker {
1222 public:
1223  RestrictedDispatchClient(WaitableEvent* sent_ping_event,
1224                           RestrictedDispatchServer* server,
1225                           NonRestrictedDispatchServer* server2,
1226                           int* success)
1227      : Worker("restricted_channel", Channel::MODE_CLIENT),
1228        ping_(0),
1229        server_(server),
1230        server2_(server2),
1231        success_(success),
1232        sent_ping_event_(sent_ping_event) {}
1233
1234  virtual void Run() OVERRIDE {
1235    // Incoming messages from our channel should only be dispatched when we
1236    // send a message on that same channel.
1237    channel()->SetRestrictDispatchChannelGroup(1);
1238
1239    server_->ListenerThread()->message_loop()->PostTask(
1240        FROM_HERE, base::Bind(&RestrictedDispatchServer::OnDoPing, server_, 1));
1241    sent_ping_event_->Wait();
1242    Send(new SyncChannelTestMsg_NoArgs);
1243    if (ping_ == 1)
1244      ++*success_;
1245    else
1246      LOG(ERROR) << "Send failed to dispatch incoming message on same channel";
1247
1248    non_restricted_channel_.reset(
1249        new SyncChannel("non_restricted_channel",
1250                        Channel::MODE_CLIENT,
1251                        this,
1252                        ipc_thread().message_loop_proxy().get(),
1253                        true,
1254                        shutdown_event()));
1255
1256    server_->ListenerThread()->message_loop()->PostTask(
1257        FROM_HERE, base::Bind(&RestrictedDispatchServer::OnDoPing, server_, 2));
1258    sent_ping_event_->Wait();
1259    // Check that the incoming message is *not* dispatched when sending on the
1260    // non restricted channel.
1261    // TODO(piman): there is a possibility of a false positive race condition
1262    // here, if the message that was posted on the server-side end of the pipe
1263    // is not visible yet on the client side, but I don't know how to solve this
1264    // without hooking into the internals of SyncChannel. I haven't seen it in
1265    // practice (i.e. not setting SetRestrictDispatchToSameChannel does cause
1266    // the following to fail).
1267    non_restricted_channel_->Send(new SyncChannelTestMsg_NoArgs);
1268    if (ping_ == 1)
1269      ++*success_;
1270    else
1271      LOG(ERROR) << "Send dispatched message from restricted channel";
1272
1273    Send(new SyncChannelTestMsg_NoArgs);
1274    if (ping_ == 2)
1275      ++*success_;
1276    else
1277      LOG(ERROR) << "Send failed to dispatch incoming message on same channel";
1278
1279    // Check that the incoming message on the non-restricted channel is
1280    // dispatched when sending on the restricted channel.
1281    server2_->ListenerThread()->message_loop()->PostTask(
1282        FROM_HERE,
1283        base::Bind(&NonRestrictedDispatchServer::OnDoPingTTL, server2_, 3));
1284    int value = 0;
1285    Send(new SyncChannelTestMsg_PingTTL(4, &value));
1286    if (ping_ == 3 && value == 4)
1287      ++*success_;
1288    else
1289      LOG(ERROR) << "Send failed to dispatch message from unrestricted channel";
1290
1291    non_restricted_channel_->Send(new SyncChannelTestMsg_Done);
1292    non_restricted_channel_.reset();
1293    Send(new SyncChannelTestMsg_Done);
1294    Done();
1295  }
1296
1297 private:
1298  virtual bool OnMessageReceived(const Message& message) OVERRIDE {
1299    IPC_BEGIN_MESSAGE_MAP(RestrictedDispatchClient, message)
1300     IPC_MESSAGE_HANDLER(SyncChannelTestMsg_Ping, OnPing)
1301     IPC_MESSAGE_HANDLER_DELAY_REPLY(SyncChannelTestMsg_PingTTL, OnPingTTL)
1302    IPC_END_MESSAGE_MAP()
1303    return true;
1304  }
1305
1306  void OnPing(int ping) {
1307    ping_ = ping;
1308  }
1309
1310  void OnPingTTL(int ping, IPC::Message* reply) {
1311    ping_ = ping;
1312    // This message comes from the NonRestrictedDispatchServer, we have to send
1313    // the reply back manually.
1314    SyncChannelTestMsg_PingTTL::WriteReplyParams(reply, ping);
1315    non_restricted_channel_->Send(reply);
1316  }
1317
1318  int ping_;
1319  RestrictedDispatchServer* server_;
1320  NonRestrictedDispatchServer* server2_;
1321  int* success_;
1322  WaitableEvent* sent_ping_event_;
1323  scoped_ptr<SyncChannel> non_restricted_channel_;
1324};
1325
1326TEST_F(IPCSyncChannelTest, RestrictedDispatch) {
1327  WaitableEvent sent_ping_event(false, false);
1328  WaitableEvent wait_event(false, false);
1329  RestrictedDispatchServer* server =
1330      new RestrictedDispatchServer(&sent_ping_event, &wait_event);
1331  NonRestrictedDispatchServer* server2 =
1332      new NonRestrictedDispatchServer(&wait_event);
1333
1334  int success = 0;
1335  std::vector<Worker*> workers;
1336  workers.push_back(server);
1337  workers.push_back(server2);
1338  workers.push_back(new RestrictedDispatchClient(
1339      &sent_ping_event, server, server2, &success));
1340  RunTest(workers);
1341  EXPECT_EQ(4, success);
1342}
1343
1344//------------------------------------------------------------------------------
1345
1346// This test case inspired by crbug.com/108491
1347// We create two servers that use the same ListenerThread but have
1348// SetRestrictDispatchToSameChannel set to true.
1349// We create clients, then use some specific WaitableEvent wait/signalling to
1350// ensure that messages get dispatched in a way that causes a deadlock due to
1351// a nested dispatch and an eligible message in a higher-level dispatch's
1352// delayed_queue. Specifically, we start with client1 about so send an
1353// unblocking message to server1, while the shared listener thread for the
1354// servers server1 and server2 is about to send a non-unblocking message to
1355// client1. At the same time, client2 will be about to send an unblocking
1356// message to server2. Server1 will handle the client1->server1 message by
1357// telling server2 to send a non-unblocking message to client2.
1358// What should happen is that the send to server2 should find the pending,
1359// same-context client2->server2 message to dispatch, causing client2 to
1360// unblock then handle the server2->client2 message, so that the shared
1361// servers' listener thread can then respond to the client1->server1 message.
1362// Then client1 can handle the non-unblocking server1->client1 message.
1363// The old code would end up in a state where the server2->client2 message is
1364// sent, but the client2->server2 message (which is eligible for dispatch, and
1365// which is what client2 is waiting for) is stashed in a local delayed_queue
1366// that has server1's channel context, causing a deadlock.
1367// WaitableEvents in the events array are used to:
1368//   event 0: indicate to client1 that server listener is in OnDoServerTask
1369//   event 1: indicate to client1 that client2 listener is in OnDoClient2Task
1370//   event 2: indicate to server1 that client2 listener is in OnDoClient2Task
1371//   event 3: indicate to client2 that server listener is in OnDoServerTask
1372
1373class RestrictedDispatchDeadlockServer : public Worker {
1374 public:
1375  RestrictedDispatchDeadlockServer(int server_num,
1376                                   WaitableEvent* server_ready_event,
1377                                   WaitableEvent** events,
1378                                   RestrictedDispatchDeadlockServer* peer)
1379      : Worker(server_num == 1 ? "channel1" : "channel2", Channel::MODE_SERVER),
1380        server_num_(server_num),
1381        server_ready_event_(server_ready_event),
1382        events_(events),
1383        peer_(peer) { }
1384
1385  void OnDoServerTask() {
1386    events_[3]->Signal();
1387    events_[2]->Wait();
1388    events_[0]->Signal();
1389    SendMessageToClient();
1390  }
1391
1392  virtual void Run() OVERRIDE {
1393    channel()->SetRestrictDispatchChannelGroup(1);
1394    server_ready_event_->Signal();
1395  }
1396
1397  base::Thread* ListenerThread() { return Worker::ListenerThread(); }
1398
1399 private:
1400  virtual bool OnMessageReceived(const Message& message) OVERRIDE {
1401    IPC_BEGIN_MESSAGE_MAP(RestrictedDispatchDeadlockServer, message)
1402     IPC_MESSAGE_HANDLER(SyncChannelTestMsg_NoArgs, OnNoArgs)
1403     IPC_MESSAGE_HANDLER(SyncChannelTestMsg_Done, Done)
1404    IPC_END_MESSAGE_MAP()
1405    return true;
1406  }
1407
1408  void OnNoArgs() {
1409    if (server_num_ == 1) {
1410      DCHECK(peer_ != NULL);
1411      peer_->SendMessageToClient();
1412    }
1413  }
1414
1415  void SendMessageToClient() {
1416    Message* msg = new SyncChannelTestMsg_NoArgs;
1417    msg->set_unblock(false);
1418    DCHECK(!msg->should_unblock());
1419    Send(msg);
1420  }
1421
1422  int server_num_;
1423  WaitableEvent* server_ready_event_;
1424  WaitableEvent** events_;
1425  RestrictedDispatchDeadlockServer* peer_;
1426};
1427
1428class RestrictedDispatchDeadlockClient2 : public Worker {
1429 public:
1430  RestrictedDispatchDeadlockClient2(RestrictedDispatchDeadlockServer* server,
1431                                    WaitableEvent* server_ready_event,
1432                                    WaitableEvent** events)
1433      : Worker("channel2", Channel::MODE_CLIENT),
1434        server_ready_event_(server_ready_event),
1435        events_(events),
1436        received_msg_(false),
1437        received_noarg_reply_(false),
1438        done_issued_(false) {}
1439
1440  virtual void Run() OVERRIDE {
1441    server_ready_event_->Wait();
1442  }
1443
1444  void OnDoClient2Task() {
1445    events_[3]->Wait();
1446    events_[1]->Signal();
1447    events_[2]->Signal();
1448    DCHECK(received_msg_ == false);
1449
1450    Message* message = new SyncChannelTestMsg_NoArgs;
1451    message->set_unblock(true);
1452    Send(message);
1453    received_noarg_reply_ = true;
1454  }
1455
1456  base::Thread* ListenerThread() { return Worker::ListenerThread(); }
1457 private:
1458  virtual bool OnMessageReceived(const Message& message) OVERRIDE {
1459    IPC_BEGIN_MESSAGE_MAP(RestrictedDispatchDeadlockClient2, message)
1460     IPC_MESSAGE_HANDLER(SyncChannelTestMsg_NoArgs, OnNoArgs)
1461    IPC_END_MESSAGE_MAP()
1462    return true;
1463  }
1464
1465  void OnNoArgs() {
1466    received_msg_ = true;
1467    PossiblyDone();
1468  }
1469
1470  void PossiblyDone() {
1471    if (received_noarg_reply_ && received_msg_) {
1472      DCHECK(done_issued_ == false);
1473      done_issued_ = true;
1474      Send(new SyncChannelTestMsg_Done);
1475      Done();
1476    }
1477  }
1478
1479  WaitableEvent* server_ready_event_;
1480  WaitableEvent** events_;
1481  bool received_msg_;
1482  bool received_noarg_reply_;
1483  bool done_issued_;
1484};
1485
1486class RestrictedDispatchDeadlockClient1 : public Worker {
1487 public:
1488  RestrictedDispatchDeadlockClient1(RestrictedDispatchDeadlockServer* server,
1489                                    RestrictedDispatchDeadlockClient2* peer,
1490                                    WaitableEvent* server_ready_event,
1491                                    WaitableEvent** events)
1492      : Worker("channel1", Channel::MODE_CLIENT),
1493        server_(server),
1494        peer_(peer),
1495        server_ready_event_(server_ready_event),
1496        events_(events),
1497        received_msg_(false),
1498        received_noarg_reply_(false),
1499        done_issued_(false) {}
1500
1501  virtual void Run() OVERRIDE {
1502    server_ready_event_->Wait();
1503    server_->ListenerThread()->message_loop()->PostTask(
1504        FROM_HERE,
1505        base::Bind(&RestrictedDispatchDeadlockServer::OnDoServerTask, server_));
1506    peer_->ListenerThread()->message_loop()->PostTask(
1507        FROM_HERE,
1508        base::Bind(&RestrictedDispatchDeadlockClient2::OnDoClient2Task, peer_));
1509    events_[0]->Wait();
1510    events_[1]->Wait();
1511    DCHECK(received_msg_ == false);
1512
1513    Message* message = new SyncChannelTestMsg_NoArgs;
1514    message->set_unblock(true);
1515    Send(message);
1516    received_noarg_reply_ = true;
1517    PossiblyDone();
1518  }
1519
1520  base::Thread* ListenerThread() { return Worker::ListenerThread(); }
1521 private:
1522  virtual bool OnMessageReceived(const Message& message) OVERRIDE {
1523    IPC_BEGIN_MESSAGE_MAP(RestrictedDispatchDeadlockClient1, message)
1524     IPC_MESSAGE_HANDLER(SyncChannelTestMsg_NoArgs, OnNoArgs)
1525    IPC_END_MESSAGE_MAP()
1526    return true;
1527  }
1528
1529  void OnNoArgs() {
1530    received_msg_ = true;
1531    PossiblyDone();
1532  }
1533
1534  void PossiblyDone() {
1535    if (received_noarg_reply_ && received_msg_) {
1536      DCHECK(done_issued_ == false);
1537      done_issued_ = true;
1538      Send(new SyncChannelTestMsg_Done);
1539      Done();
1540    }
1541  }
1542
1543  RestrictedDispatchDeadlockServer* server_;
1544  RestrictedDispatchDeadlockClient2* peer_;
1545  WaitableEvent* server_ready_event_;
1546  WaitableEvent** events_;
1547  bool received_msg_;
1548  bool received_noarg_reply_;
1549  bool done_issued_;
1550};
1551
1552TEST_F(IPCSyncChannelTest, RestrictedDispatchDeadlock) {
1553  std::vector<Worker*> workers;
1554
1555  // A shared worker thread so that server1 and server2 run on one thread.
1556  base::Thread worker_thread("RestrictedDispatchDeadlock");
1557  ASSERT_TRUE(worker_thread.Start());
1558
1559  WaitableEvent server1_ready(false, false);
1560  WaitableEvent server2_ready(false, false);
1561
1562  WaitableEvent event0(false, false);
1563  WaitableEvent event1(false, false);
1564  WaitableEvent event2(false, false);
1565  WaitableEvent event3(false, false);
1566  WaitableEvent* events[4] = {&event0, &event1, &event2, &event3};
1567
1568  RestrictedDispatchDeadlockServer* server1;
1569  RestrictedDispatchDeadlockServer* server2;
1570  RestrictedDispatchDeadlockClient1* client1;
1571  RestrictedDispatchDeadlockClient2* client2;
1572
1573  server2 = new RestrictedDispatchDeadlockServer(2, &server2_ready, events,
1574                                                 NULL);
1575  server2->OverrideThread(&worker_thread);
1576  workers.push_back(server2);
1577
1578  client2 = new RestrictedDispatchDeadlockClient2(server2, &server2_ready,
1579                                                  events);
1580  workers.push_back(client2);
1581
1582  server1 = new RestrictedDispatchDeadlockServer(1, &server1_ready, events,
1583                                                 server2);
1584  server1->OverrideThread(&worker_thread);
1585  workers.push_back(server1);
1586
1587  client1 = new RestrictedDispatchDeadlockClient1(server1, client2,
1588                                                  &server1_ready, events);
1589  workers.push_back(client1);
1590
1591  RunTest(workers);
1592}
1593
1594//------------------------------------------------------------------------------
1595
1596// This test case inspired by crbug.com/120530
1597// We create 4 workers that pipe to each other W1->W2->W3->W4->W1 then we send a
1598// message that recurses through 3, 4 or 5 steps to make sure, say, W1 can
1599// re-enter when called from W4 while it's sending a message to W2.
1600// The first worker drives the whole test so it must be treated specially.
1601
1602class RestrictedDispatchPipeWorker : public Worker {
1603 public:
1604  RestrictedDispatchPipeWorker(
1605      const std::string &channel1,
1606      WaitableEvent* event1,
1607      const std::string &channel2,
1608      WaitableEvent* event2,
1609      int group,
1610      int* success)
1611      : Worker(channel1, Channel::MODE_SERVER),
1612        event1_(event1),
1613        event2_(event2),
1614        other_channel_name_(channel2),
1615        group_(group),
1616        success_(success) {
1617  }
1618
1619  void OnPingTTL(int ping, int* ret) {
1620    *ret = 0;
1621    if (!ping)
1622      return;
1623    other_channel_->Send(new SyncChannelTestMsg_PingTTL(ping - 1, ret));
1624    ++*ret;
1625  }
1626
1627  void OnDone() {
1628    if (is_first())
1629      return;
1630    other_channel_->Send(new SyncChannelTestMsg_Done);
1631    other_channel_.reset();
1632    Done();
1633  }
1634
1635  virtual void Run() OVERRIDE {
1636    channel()->SetRestrictDispatchChannelGroup(group_);
1637    if (is_first())
1638      event1_->Signal();
1639    event2_->Wait();
1640    other_channel_.reset(
1641        new SyncChannel(other_channel_name_,
1642                        Channel::MODE_CLIENT,
1643                        this,
1644                        ipc_thread().message_loop_proxy().get(),
1645                        true,
1646                        shutdown_event()));
1647    other_channel_->SetRestrictDispatchChannelGroup(group_);
1648    if (!is_first()) {
1649      event1_->Signal();
1650      return;
1651    }
1652    *success_ = 0;
1653    int value = 0;
1654    OnPingTTL(3, &value);
1655    *success_ += (value == 3);
1656    OnPingTTL(4, &value);
1657    *success_ += (value == 4);
1658    OnPingTTL(5, &value);
1659    *success_ += (value == 5);
1660    other_channel_->Send(new SyncChannelTestMsg_Done);
1661    other_channel_.reset();
1662    Done();
1663  }
1664
1665  bool is_first() { return !!success_; }
1666
1667 private:
1668  virtual bool OnMessageReceived(const Message& message) OVERRIDE {
1669    IPC_BEGIN_MESSAGE_MAP(RestrictedDispatchPipeWorker, message)
1670     IPC_MESSAGE_HANDLER(SyncChannelTestMsg_PingTTL, OnPingTTL)
1671     IPC_MESSAGE_HANDLER(SyncChannelTestMsg_Done, OnDone)
1672    IPC_END_MESSAGE_MAP()
1673    return true;
1674  }
1675
1676  scoped_ptr<SyncChannel> other_channel_;
1677  WaitableEvent* event1_;
1678  WaitableEvent* event2_;
1679  std::string other_channel_name_;
1680  int group_;
1681  int* success_;
1682};
1683
1684TEST_F(IPCSyncChannelTest, RestrictedDispatch4WayDeadlock) {
1685  int success = 0;
1686  std::vector<Worker*> workers;
1687  WaitableEvent event0(true, false);
1688  WaitableEvent event1(true, false);
1689  WaitableEvent event2(true, false);
1690  WaitableEvent event3(true, false);
1691  workers.push_back(new RestrictedDispatchPipeWorker(
1692        "channel0", &event0, "channel1", &event1, 1, &success));
1693  workers.push_back(new RestrictedDispatchPipeWorker(
1694        "channel1", &event1, "channel2", &event2, 2, NULL));
1695  workers.push_back(new RestrictedDispatchPipeWorker(
1696        "channel2", &event2, "channel3", &event3, 3, NULL));
1697  workers.push_back(new RestrictedDispatchPipeWorker(
1698        "channel3", &event3, "channel0", &event0, 4, NULL));
1699  RunTest(workers);
1700  EXPECT_EQ(3, success);
1701}
1702
1703//------------------------------------------------------------------------------
1704
1705// This test case inspired by crbug.com/122443
1706// We want to make sure a reply message with the unblock flag set correctly
1707// behaves as a reply, not a regular message.
1708// We have 3 workers. Server1 will send a message to Server2 (which will block),
1709// during which it will dispatch a message comming from Client, at which point
1710// it will send another message to Server2. While sending that second message it
1711// will receive a reply from Server1 with the unblock flag.
1712
1713class ReentrantReplyServer1 : public Worker {
1714 public:
1715  ReentrantReplyServer1(WaitableEvent* server_ready)
1716      : Worker("reentrant_reply1", Channel::MODE_SERVER),
1717        server_ready_(server_ready) { }
1718
1719  virtual void Run() OVERRIDE {
1720    server2_channel_.reset(
1721        new SyncChannel("reentrant_reply2",
1722                        Channel::MODE_CLIENT,
1723                        this,
1724                        ipc_thread().message_loop_proxy().get(),
1725                        true,
1726                        shutdown_event()));
1727    server_ready_->Signal();
1728    Message* msg = new SyncChannelTestMsg_Reentrant1();
1729    server2_channel_->Send(msg);
1730    server2_channel_.reset();
1731    Done();
1732  }
1733
1734 private:
1735  virtual bool OnMessageReceived(const Message& message) OVERRIDE {
1736    IPC_BEGIN_MESSAGE_MAP(ReentrantReplyServer1, message)
1737     IPC_MESSAGE_HANDLER(SyncChannelTestMsg_Reentrant2, OnReentrant2)
1738     IPC_REPLY_HANDLER(OnReply)
1739    IPC_END_MESSAGE_MAP()
1740    return true;
1741  }
1742
1743  void OnReentrant2() {
1744    Message* msg = new SyncChannelTestMsg_Reentrant3();
1745    server2_channel_->Send(msg);
1746  }
1747
1748  void OnReply(const Message& message) {
1749    // If we get here, the Send() will never receive the reply (thus would
1750    // hang), so abort instead.
1751    LOG(FATAL) << "Reply message was dispatched";
1752  }
1753
1754  WaitableEvent* server_ready_;
1755  scoped_ptr<SyncChannel> server2_channel_;
1756};
1757
1758class ReentrantReplyServer2 : public Worker {
1759 public:
1760  ReentrantReplyServer2()
1761      : Worker("reentrant_reply2", Channel::MODE_SERVER),
1762        reply_(NULL) { }
1763
1764 private:
1765  virtual bool OnMessageReceived(const Message& message) OVERRIDE {
1766    IPC_BEGIN_MESSAGE_MAP(ReentrantReplyServer2, message)
1767     IPC_MESSAGE_HANDLER_DELAY_REPLY(
1768         SyncChannelTestMsg_Reentrant1, OnReentrant1)
1769     IPC_MESSAGE_HANDLER(SyncChannelTestMsg_Reentrant3, OnReentrant3)
1770    IPC_END_MESSAGE_MAP()
1771    return true;
1772  }
1773
1774  void OnReentrant1(Message* reply) {
1775    DCHECK(!reply_);
1776    reply_ = reply;
1777  }
1778
1779  void OnReentrant3() {
1780    DCHECK(reply_);
1781    Message* reply = reply_;
1782    reply_ = NULL;
1783    reply->set_unblock(true);
1784    Send(reply);
1785    Done();
1786  }
1787
1788  Message* reply_;
1789};
1790
1791class ReentrantReplyClient : public Worker {
1792 public:
1793  ReentrantReplyClient(WaitableEvent* server_ready)
1794      : Worker("reentrant_reply1", Channel::MODE_CLIENT),
1795        server_ready_(server_ready) { }
1796
1797  virtual void Run() OVERRIDE {
1798    server_ready_->Wait();
1799    Send(new SyncChannelTestMsg_Reentrant2());
1800    Done();
1801  }
1802
1803 private:
1804  WaitableEvent* server_ready_;
1805};
1806
1807TEST_F(IPCSyncChannelTest, ReentrantReply) {
1808  std::vector<Worker*> workers;
1809  WaitableEvent server_ready(false, false);
1810  workers.push_back(new ReentrantReplyServer2());
1811  workers.push_back(new ReentrantReplyServer1(&server_ready));
1812  workers.push_back(new ReentrantReplyClient(&server_ready));
1813  RunTest(workers);
1814}
1815
1816//------------------------------------------------------------------------------
1817
1818// Generate a validated channel ID using Channel::GenerateVerifiedChannelID().
1819
1820class VerifiedServer : public Worker {
1821 public:
1822  VerifiedServer(base::Thread* listener_thread,
1823                 const std::string& channel_name,
1824                 const std::string& reply_text)
1825      : Worker(channel_name, Channel::MODE_SERVER),
1826        reply_text_(reply_text) {
1827    Worker::OverrideThread(listener_thread);
1828  }
1829
1830  virtual void OnNestedTestMsg(Message* reply_msg) OVERRIDE {
1831    VLOG(1) << __FUNCTION__ << " Sending reply: " << reply_text_;
1832    SyncChannelNestedTestMsg_String::WriteReplyParams(reply_msg, reply_text_);
1833    Send(reply_msg);
1834    ASSERT_EQ(channel()->peer_pid(), base::GetCurrentProcId());
1835    Done();
1836  }
1837
1838 private:
1839  std::string reply_text_;
1840};
1841
1842class VerifiedClient : public Worker {
1843 public:
1844  VerifiedClient(base::Thread* listener_thread,
1845                 const std::string& channel_name,
1846                 const std::string& expected_text)
1847      : Worker(channel_name, Channel::MODE_CLIENT),
1848        expected_text_(expected_text) {
1849    Worker::OverrideThread(listener_thread);
1850  }
1851
1852  virtual void Run() OVERRIDE {
1853    std::string response;
1854    SyncMessage* msg = new SyncChannelNestedTestMsg_String(&response);
1855    bool result = Send(msg);
1856    DCHECK(result);
1857    DCHECK_EQ(response, expected_text_);
1858    // expected_text_ is only used in the above DCHECK. This line suppresses the
1859    // "unused private field" warning in release builds.
1860    (void)expected_text_;
1861
1862    VLOG(1) << __FUNCTION__ << " Received reply: " << response;
1863    ASSERT_EQ(channel()->peer_pid(), base::GetCurrentProcId());
1864    Done();
1865  }
1866
1867 private:
1868  std::string expected_text_;
1869};
1870
1871void Verified() {
1872  std::vector<Worker*> workers;
1873
1874  // A shared worker thread for servers
1875  base::Thread server_worker_thread("Verified_ServerListener");
1876  ASSERT_TRUE(server_worker_thread.Start());
1877
1878  base::Thread client_worker_thread("Verified_ClientListener");
1879  ASSERT_TRUE(client_worker_thread.Start());
1880
1881  std::string channel_id = Channel::GenerateVerifiedChannelID("Verified");
1882  Worker* worker;
1883
1884  worker = new VerifiedServer(&server_worker_thread,
1885                              channel_id,
1886                              "Got first message");
1887  workers.push_back(worker);
1888
1889  worker = new VerifiedClient(&client_worker_thread,
1890                              channel_id,
1891                              "Got first message");
1892  workers.push_back(worker);
1893
1894  RunTest(workers);
1895}
1896
1897// Windows needs to send an out-of-band secret to verify the client end of the
1898// channel. Test that we still connect correctly in that case.
1899TEST_F(IPCSyncChannelTest, Verified) {
1900  Verified();
1901}
1902
1903}  // namespace
1904}  // namespace IPC
1905