1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/socket/client_socket_pool_base.h"
6
7#include "base/compiler_specific.h"
8#include "base/format_macros.h"
9#include "base/logging.h"
10#include "base/message_loop/message_loop.h"
11#include "base/metrics/stats_counters.h"
12#include "base/stl_util.h"
13#include "base/strings/string_util.h"
14#include "base/time/time.h"
15#include "base/values.h"
16#include "net/base/net_errors.h"
17#include "net/base/net_log.h"
18
19using base::TimeDelta;
20
21namespace net {
22
23namespace {
24
25// Indicate whether we should enable idle socket cleanup timer. When timer is
26// disabled, sockets are closed next time a socket request is made.
27bool g_cleanup_timer_enabled = true;
28
29// The timeout value, in seconds, used to clean up idle sockets that can't be
30// reused.
31//
32// Note: It's important to close idle sockets that have received data as soon
33// as possible because the received data may cause BSOD on Windows XP under
34// some conditions.  See http://crbug.com/4606.
35const int kCleanupInterval = 10;  // DO NOT INCREASE THIS TIMEOUT.
36
37// Indicate whether or not we should establish a new transport layer connection
38// after a certain timeout has passed without receiving an ACK.
39bool g_connect_backup_jobs_enabled = true;
40
41}  // namespace
42
43ConnectJob::ConnectJob(const std::string& group_name,
44                       base::TimeDelta timeout_duration,
45                       RequestPriority priority,
46                       Delegate* delegate,
47                       const BoundNetLog& net_log)
48    : group_name_(group_name),
49      timeout_duration_(timeout_duration),
50      priority_(priority),
51      delegate_(delegate),
52      net_log_(net_log),
53      idle_(true) {
54  DCHECK(!group_name.empty());
55  DCHECK(delegate);
56  net_log.BeginEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB,
57                     NetLog::StringCallback("group_name", &group_name_));
58}
59
60ConnectJob::~ConnectJob() {
61  net_log().EndEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB);
62}
63
64scoped_ptr<StreamSocket> ConnectJob::PassSocket() {
65  return socket_.Pass();
66}
67
68int ConnectJob::Connect() {
69  if (timeout_duration_ != base::TimeDelta())
70    timer_.Start(FROM_HERE, timeout_duration_, this, &ConnectJob::OnTimeout);
71
72  idle_ = false;
73
74  LogConnectStart();
75
76  int rv = ConnectInternal();
77
78  if (rv != ERR_IO_PENDING) {
79    LogConnectCompletion(rv);
80    delegate_ = NULL;
81  }
82
83  return rv;
84}
85
86void ConnectJob::SetSocket(scoped_ptr<StreamSocket> socket) {
87  if (socket) {
88    net_log().AddEvent(NetLog::TYPE_CONNECT_JOB_SET_SOCKET,
89                       socket->NetLog().source().ToEventParametersCallback());
90  }
91  socket_ = socket.Pass();
92}
93
94void ConnectJob::NotifyDelegateOfCompletion(int rv) {
95  // The delegate will own |this|.
96  Delegate* delegate = delegate_;
97  delegate_ = NULL;
98
99  LogConnectCompletion(rv);
100  delegate->OnConnectJobComplete(rv, this);
101}
102
103void ConnectJob::ResetTimer(base::TimeDelta remaining_time) {
104  timer_.Stop();
105  timer_.Start(FROM_HERE, remaining_time, this, &ConnectJob::OnTimeout);
106}
107
108void ConnectJob::LogConnectStart() {
109  connect_timing_.connect_start = base::TimeTicks::Now();
110  net_log().BeginEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_CONNECT);
111}
112
113void ConnectJob::LogConnectCompletion(int net_error) {
114  connect_timing_.connect_end = base::TimeTicks::Now();
115  net_log().EndEventWithNetErrorCode(
116      NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_CONNECT, net_error);
117}
118
119void ConnectJob::OnTimeout() {
120  // Make sure the socket is NULL before calling into |delegate|.
121  SetSocket(scoped_ptr<StreamSocket>());
122
123  net_log_.AddEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_TIMED_OUT);
124
125  NotifyDelegateOfCompletion(ERR_TIMED_OUT);
126}
127
128namespace internal {
129
130ClientSocketPoolBaseHelper::Request::Request(
131    ClientSocketHandle* handle,
132    const CompletionCallback& callback,
133    RequestPriority priority,
134    bool ignore_limits,
135    Flags flags,
136    const BoundNetLog& net_log)
137    : handle_(handle),
138      callback_(callback),
139      priority_(priority),
140      ignore_limits_(ignore_limits),
141      flags_(flags),
142      net_log_(net_log) {
143  if (ignore_limits_)
144    DCHECK_EQ(priority_, MAXIMUM_PRIORITY);
145}
146
147ClientSocketPoolBaseHelper::Request::~Request() {}
148
149ClientSocketPoolBaseHelper::ClientSocketPoolBaseHelper(
150    HigherLayeredPool* pool,
151    int max_sockets,
152    int max_sockets_per_group,
153    base::TimeDelta unused_idle_socket_timeout,
154    base::TimeDelta used_idle_socket_timeout,
155    ConnectJobFactory* connect_job_factory)
156    : idle_socket_count_(0),
157      connecting_socket_count_(0),
158      handed_out_socket_count_(0),
159      max_sockets_(max_sockets),
160      max_sockets_per_group_(max_sockets_per_group),
161      use_cleanup_timer_(g_cleanup_timer_enabled),
162      unused_idle_socket_timeout_(unused_idle_socket_timeout),
163      used_idle_socket_timeout_(used_idle_socket_timeout),
164      connect_job_factory_(connect_job_factory),
165      connect_backup_jobs_enabled_(false),
166      pool_generation_number_(0),
167      pool_(pool),
168      weak_factory_(this) {
169  DCHECK_LE(0, max_sockets_per_group);
170  DCHECK_LE(max_sockets_per_group, max_sockets);
171
172  NetworkChangeNotifier::AddIPAddressObserver(this);
173}
174
175ClientSocketPoolBaseHelper::~ClientSocketPoolBaseHelper() {
176  // Clean up any idle sockets and pending connect jobs.  Assert that we have no
177  // remaining active sockets or pending requests.  They should have all been
178  // cleaned up prior to |this| being destroyed.
179  FlushWithError(ERR_ABORTED);
180  DCHECK(group_map_.empty());
181  DCHECK(pending_callback_map_.empty());
182  DCHECK_EQ(0, connecting_socket_count_);
183  CHECK(higher_pools_.empty());
184
185  NetworkChangeNotifier::RemoveIPAddressObserver(this);
186
187  // Remove from lower layer pools.
188  for (std::set<LowerLayeredPool*>::iterator it = lower_pools_.begin();
189       it != lower_pools_.end();
190       ++it) {
191    (*it)->RemoveHigherLayeredPool(pool_);
192  }
193}
194
195ClientSocketPoolBaseHelper::CallbackResultPair::CallbackResultPair()
196    : result(OK) {
197}
198
199ClientSocketPoolBaseHelper::CallbackResultPair::CallbackResultPair(
200    const CompletionCallback& callback_in, int result_in)
201    : callback(callback_in),
202      result(result_in) {
203}
204
205ClientSocketPoolBaseHelper::CallbackResultPair::~CallbackResultPair() {}
206
207bool ClientSocketPoolBaseHelper::IsStalled() const {
208  // If a lower layer pool is stalled, consider |this| stalled as well.
209  for (std::set<LowerLayeredPool*>::const_iterator it = lower_pools_.begin();
210       it != lower_pools_.end();
211       ++it) {
212    if ((*it)->IsStalled())
213      return true;
214  }
215
216  // If fewer than |max_sockets_| are in use, then clearly |this| is not
217  // stalled.
218  if ((handed_out_socket_count_ + connecting_socket_count_) < max_sockets_)
219    return false;
220  // So in order to be stalled, |this| must be using at least |max_sockets_| AND
221  // |this| must have a request that is actually stalled on the global socket
222  // limit.  To find such a request, look for a group that has more requests
223  // than jobs AND where the number of sockets is less than
224  // |max_sockets_per_group_|.  (If the number of sockets is equal to
225  // |max_sockets_per_group_|, then the request is stalled on the group limit,
226  // which does not count.)
227  for (GroupMap::const_iterator it = group_map_.begin();
228       it != group_map_.end(); ++it) {
229    if (it->second->IsStalledOnPoolMaxSockets(max_sockets_per_group_))
230      return true;
231  }
232  return false;
233}
234
235void ClientSocketPoolBaseHelper::AddLowerLayeredPool(
236    LowerLayeredPool* lower_pool) {
237  DCHECK(pool_);
238  CHECK(!ContainsKey(lower_pools_, lower_pool));
239  lower_pools_.insert(lower_pool);
240  lower_pool->AddHigherLayeredPool(pool_);
241}
242
243void ClientSocketPoolBaseHelper::AddHigherLayeredPool(
244    HigherLayeredPool* higher_pool) {
245  CHECK(higher_pool);
246  CHECK(!ContainsKey(higher_pools_, higher_pool));
247  higher_pools_.insert(higher_pool);
248}
249
250void ClientSocketPoolBaseHelper::RemoveHigherLayeredPool(
251    HigherLayeredPool* higher_pool) {
252  CHECK(higher_pool);
253  CHECK(ContainsKey(higher_pools_, higher_pool));
254  higher_pools_.erase(higher_pool);
255}
256
257int ClientSocketPoolBaseHelper::RequestSocket(
258    const std::string& group_name,
259    scoped_ptr<const Request> request) {
260  CHECK(!request->callback().is_null());
261  CHECK(request->handle());
262
263  // Cleanup any timed-out idle sockets if no timer is used.
264  if (!use_cleanup_timer_)
265    CleanupIdleSockets(false);
266
267  request->net_log().BeginEvent(NetLog::TYPE_SOCKET_POOL);
268  Group* group = GetOrCreateGroup(group_name);
269
270  int rv = RequestSocketInternal(group_name, *request);
271  if (rv != ERR_IO_PENDING) {
272    request->net_log().EndEventWithNetErrorCode(NetLog::TYPE_SOCKET_POOL, rv);
273    CHECK(!request->handle()->is_initialized());
274    request.reset();
275  } else {
276    group->InsertPendingRequest(request.Pass());
277    // Have to do this asynchronously, as closing sockets in higher level pools
278    // call back in to |this|, which will cause all sorts of fun and exciting
279    // re-entrancy issues if the socket pool is doing something else at the
280    // time.
281    if (group->IsStalledOnPoolMaxSockets(max_sockets_per_group_)) {
282      base::MessageLoop::current()->PostTask(
283          FROM_HERE,
284          base::Bind(
285              &ClientSocketPoolBaseHelper::TryToCloseSocketsInLayeredPools,
286              weak_factory_.GetWeakPtr()));
287    }
288  }
289  return rv;
290}
291
292void ClientSocketPoolBaseHelper::RequestSockets(
293    const std::string& group_name,
294    const Request& request,
295    int num_sockets) {
296  DCHECK(request.callback().is_null());
297  DCHECK(!request.handle());
298
299  // Cleanup any timed out idle sockets if no timer is used.
300  if (!use_cleanup_timer_)
301    CleanupIdleSockets(false);
302
303  if (num_sockets > max_sockets_per_group_) {
304    num_sockets = max_sockets_per_group_;
305  }
306
307  request.net_log().BeginEvent(
308      NetLog::TYPE_SOCKET_POOL_CONNECTING_N_SOCKETS,
309      NetLog::IntegerCallback("num_sockets", num_sockets));
310
311  Group* group = GetOrCreateGroup(group_name);
312
313  // RequestSocketsInternal() may delete the group.
314  bool deleted_group = false;
315
316  int rv = OK;
317  for (int num_iterations_left = num_sockets;
318       group->NumActiveSocketSlots() < num_sockets &&
319       num_iterations_left > 0 ; num_iterations_left--) {
320    rv = RequestSocketInternal(group_name, request);
321    if (rv < 0 && rv != ERR_IO_PENDING) {
322      // We're encountering a synchronous error.  Give up.
323      if (!ContainsKey(group_map_, group_name))
324        deleted_group = true;
325      break;
326    }
327    if (!ContainsKey(group_map_, group_name)) {
328      // Unexpected.  The group should only be getting deleted on synchronous
329      // error.
330      NOTREACHED();
331      deleted_group = true;
332      break;
333    }
334  }
335
336  if (!deleted_group && group->IsEmpty())
337    RemoveGroup(group_name);
338
339  if (rv == ERR_IO_PENDING)
340    rv = OK;
341  request.net_log().EndEventWithNetErrorCode(
342      NetLog::TYPE_SOCKET_POOL_CONNECTING_N_SOCKETS, rv);
343}
344
345int ClientSocketPoolBaseHelper::RequestSocketInternal(
346    const std::string& group_name,
347    const Request& request) {
348  ClientSocketHandle* const handle = request.handle();
349  const bool preconnecting = !handle;
350  Group* group = GetOrCreateGroup(group_name);
351
352  if (!(request.flags() & NO_IDLE_SOCKETS)) {
353    // Try to reuse a socket.
354    if (AssignIdleSocketToRequest(request, group))
355      return OK;
356  }
357
358  // If there are more ConnectJobs than pending requests, don't need to do
359  // anything.  Can just wait for the extra job to connect, and then assign it
360  // to the request.
361  if (!preconnecting && group->TryToUseUnassignedConnectJob())
362    return ERR_IO_PENDING;
363
364  // Can we make another active socket now?
365  if (!group->HasAvailableSocketSlot(max_sockets_per_group_) &&
366      !request.ignore_limits()) {
367    // TODO(willchan): Consider whether or not we need to close a socket in a
368    // higher layered group. I don't think this makes sense since we would just
369    // reuse that socket then if we needed one and wouldn't make it down to this
370    // layer.
371    request.net_log().AddEvent(
372        NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP);
373    return ERR_IO_PENDING;
374  }
375
376  if (ReachedMaxSocketsLimit() && !request.ignore_limits()) {
377    // NOTE(mmenke):  Wonder if we really need different code for each case
378    // here.  Only reason for them now seems to be preconnects.
379    if (idle_socket_count() > 0) {
380      // There's an idle socket in this pool. Either that's because there's
381      // still one in this group, but we got here due to preconnecting bypassing
382      // idle sockets, or because there's an idle socket in another group.
383      bool closed = CloseOneIdleSocketExceptInGroup(group);
384      if (preconnecting && !closed)
385        return ERR_PRECONNECT_MAX_SOCKET_LIMIT;
386    } else {
387      // We could check if we really have a stalled group here, but it requires
388      // a scan of all groups, so just flip a flag here, and do the check later.
389      request.net_log().AddEvent(NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS);
390      return ERR_IO_PENDING;
391    }
392  }
393
394  // We couldn't find a socket to reuse, and there's space to allocate one,
395  // so allocate and connect a new one.
396  scoped_ptr<ConnectJob> connect_job(
397      connect_job_factory_->NewConnectJob(group_name, request, this));
398
399  int rv = connect_job->Connect();
400  if (rv == OK) {
401    LogBoundConnectJobToRequest(connect_job->net_log().source(), request);
402    if (!preconnecting) {
403      HandOutSocket(connect_job->PassSocket(), ClientSocketHandle::UNUSED,
404                    connect_job->connect_timing(), handle, base::TimeDelta(),
405                    group, request.net_log());
406    } else {
407      AddIdleSocket(connect_job->PassSocket(), group);
408    }
409  } else if (rv == ERR_IO_PENDING) {
410    // If we don't have any sockets in this group, set a timer for potentially
411    // creating a new one.  If the SYN is lost, this backup socket may complete
412    // before the slow socket, improving end user latency.
413    if (connect_backup_jobs_enabled_ && group->IsEmpty()) {
414      group->StartBackupJobTimer(group_name, this);
415    }
416
417    connecting_socket_count_++;
418
419    group->AddJob(connect_job.Pass(), preconnecting);
420  } else {
421    LogBoundConnectJobToRequest(connect_job->net_log().source(), request);
422    scoped_ptr<StreamSocket> error_socket;
423    if (!preconnecting) {
424      DCHECK(handle);
425      connect_job->GetAdditionalErrorState(handle);
426      error_socket = connect_job->PassSocket();
427    }
428    if (error_socket) {
429      HandOutSocket(error_socket.Pass(), ClientSocketHandle::UNUSED,
430                    connect_job->connect_timing(), handle, base::TimeDelta(),
431                    group, request.net_log());
432    } else if (group->IsEmpty()) {
433      RemoveGroup(group_name);
434    }
435  }
436
437  return rv;
438}
439
440bool ClientSocketPoolBaseHelper::AssignIdleSocketToRequest(
441    const Request& request, Group* group) {
442  std::list<IdleSocket>* idle_sockets = group->mutable_idle_sockets();
443  std::list<IdleSocket>::iterator idle_socket_it = idle_sockets->end();
444
445  // Iterate through the idle sockets forwards (oldest to newest)
446  //   * Delete any disconnected ones.
447  //   * If we find a used idle socket, assign to |idle_socket|.  At the end,
448  //   the |idle_socket_it| will be set to the newest used idle socket.
449  for (std::list<IdleSocket>::iterator it = idle_sockets->begin();
450       it != idle_sockets->end();) {
451    if (!it->IsUsable()) {
452      DecrementIdleCount();
453      delete it->socket;
454      it = idle_sockets->erase(it);
455      continue;
456    }
457
458    if (it->socket->WasEverUsed()) {
459      // We found one we can reuse!
460      idle_socket_it = it;
461    }
462
463    ++it;
464  }
465
466  // If we haven't found an idle socket, that means there are no used idle
467  // sockets.  Pick the oldest (first) idle socket (FIFO).
468
469  if (idle_socket_it == idle_sockets->end() && !idle_sockets->empty())
470    idle_socket_it = idle_sockets->begin();
471
472  if (idle_socket_it != idle_sockets->end()) {
473    DecrementIdleCount();
474    base::TimeDelta idle_time =
475        base::TimeTicks::Now() - idle_socket_it->start_time;
476    IdleSocket idle_socket = *idle_socket_it;
477    idle_sockets->erase(idle_socket_it);
478    // TODO(davidben): If |idle_time| is under some low watermark, consider
479    // treating as UNUSED rather than UNUSED_IDLE. This will avoid
480    // HttpNetworkTransaction retrying on some errors.
481    ClientSocketHandle::SocketReuseType reuse_type =
482        idle_socket.socket->WasEverUsed() ?
483            ClientSocketHandle::REUSED_IDLE :
484            ClientSocketHandle::UNUSED_IDLE;
485    HandOutSocket(
486        scoped_ptr<StreamSocket>(idle_socket.socket),
487        reuse_type,
488        LoadTimingInfo::ConnectTiming(),
489        request.handle(),
490        idle_time,
491        group,
492        request.net_log());
493    return true;
494  }
495
496  return false;
497}
498
499// static
500void ClientSocketPoolBaseHelper::LogBoundConnectJobToRequest(
501    const NetLog::Source& connect_job_source, const Request& request) {
502  request.net_log().AddEvent(NetLog::TYPE_SOCKET_POOL_BOUND_TO_CONNECT_JOB,
503                             connect_job_source.ToEventParametersCallback());
504}
505
506void ClientSocketPoolBaseHelper::CancelRequest(
507    const std::string& group_name, ClientSocketHandle* handle) {
508  PendingCallbackMap::iterator callback_it = pending_callback_map_.find(handle);
509  if (callback_it != pending_callback_map_.end()) {
510    int result = callback_it->second.result;
511    pending_callback_map_.erase(callback_it);
512    scoped_ptr<StreamSocket> socket = handle->PassSocket();
513    if (socket) {
514      if (result != OK)
515        socket->Disconnect();
516      ReleaseSocket(handle->group_name(), socket.Pass(), handle->id());
517    }
518    return;
519  }
520
521  CHECK(ContainsKey(group_map_, group_name));
522
523  Group* group = GetOrCreateGroup(group_name);
524
525  // Search pending_requests for matching handle.
526  scoped_ptr<const Request> request =
527      group->FindAndRemovePendingRequest(handle);
528  if (request) {
529    request->net_log().AddEvent(NetLog::TYPE_CANCELLED);
530    request->net_log().EndEvent(NetLog::TYPE_SOCKET_POOL);
531
532    // We let the job run, unless we're at the socket limit and there is
533    // not another request waiting on the job.
534    if (group->jobs().size() > group->pending_request_count() &&
535        ReachedMaxSocketsLimit()) {
536      RemoveConnectJob(*group->jobs().begin(), group);
537      CheckForStalledSocketGroups();
538    }
539  }
540}
541
542bool ClientSocketPoolBaseHelper::HasGroup(const std::string& group_name) const {
543  return ContainsKey(group_map_, group_name);
544}
545
546void ClientSocketPoolBaseHelper::CloseIdleSockets() {
547  CleanupIdleSockets(true);
548  DCHECK_EQ(0, idle_socket_count_);
549}
550
551int ClientSocketPoolBaseHelper::IdleSocketCountInGroup(
552    const std::string& group_name) const {
553  GroupMap::const_iterator i = group_map_.find(group_name);
554  CHECK(i != group_map_.end());
555
556  return i->second->idle_sockets().size();
557}
558
559LoadState ClientSocketPoolBaseHelper::GetLoadState(
560    const std::string& group_name,
561    const ClientSocketHandle* handle) const {
562  if (ContainsKey(pending_callback_map_, handle))
563    return LOAD_STATE_CONNECTING;
564
565  if (!ContainsKey(group_map_, group_name)) {
566    NOTREACHED() << "ClientSocketPool does not contain group: " << group_name
567                 << " for handle: " << handle;
568    return LOAD_STATE_IDLE;
569  }
570
571  // Can't use operator[] since it is non-const.
572  const Group& group = *group_map_.find(group_name)->second;
573
574  if (group.HasConnectJobForHandle(handle)) {
575    // Just return the state  of the farthest along ConnectJob for the first
576    // group.jobs().size() pending requests.
577    LoadState max_state = LOAD_STATE_IDLE;
578    for (ConnectJobSet::const_iterator job_it = group.jobs().begin();
579         job_it != group.jobs().end(); ++job_it) {
580      max_state = std::max(max_state, (*job_it)->GetLoadState());
581    }
582    return max_state;
583  }
584
585  if (group.IsStalledOnPoolMaxSockets(max_sockets_per_group_))
586    return LOAD_STATE_WAITING_FOR_STALLED_SOCKET_POOL;
587  return LOAD_STATE_WAITING_FOR_AVAILABLE_SOCKET;
588}
589
590base::DictionaryValue* ClientSocketPoolBaseHelper::GetInfoAsValue(
591    const std::string& name, const std::string& type) const {
592  base::DictionaryValue* dict = new base::DictionaryValue();
593  dict->SetString("name", name);
594  dict->SetString("type", type);
595  dict->SetInteger("handed_out_socket_count", handed_out_socket_count_);
596  dict->SetInteger("connecting_socket_count", connecting_socket_count_);
597  dict->SetInteger("idle_socket_count", idle_socket_count_);
598  dict->SetInteger("max_socket_count", max_sockets_);
599  dict->SetInteger("max_sockets_per_group", max_sockets_per_group_);
600  dict->SetInteger("pool_generation_number", pool_generation_number_);
601
602  if (group_map_.empty())
603    return dict;
604
605  base::DictionaryValue* all_groups_dict = new base::DictionaryValue();
606  for (GroupMap::const_iterator it = group_map_.begin();
607       it != group_map_.end(); it++) {
608    const Group* group = it->second;
609    base::DictionaryValue* group_dict = new base::DictionaryValue();
610
611    group_dict->SetInteger("pending_request_count",
612                           group->pending_request_count());
613    if (group->has_pending_requests()) {
614      group_dict->SetString(
615          "top_pending_priority",
616          RequestPriorityToString(group->TopPendingPriority()));
617    }
618
619    group_dict->SetInteger("active_socket_count", group->active_socket_count());
620
621    base::ListValue* idle_socket_list = new base::ListValue();
622    std::list<IdleSocket>::const_iterator idle_socket;
623    for (idle_socket = group->idle_sockets().begin();
624         idle_socket != group->idle_sockets().end();
625         idle_socket++) {
626      int source_id = idle_socket->socket->NetLog().source().id;
627      idle_socket_list->Append(new base::FundamentalValue(source_id));
628    }
629    group_dict->Set("idle_sockets", idle_socket_list);
630
631    base::ListValue* connect_jobs_list = new base::ListValue();
632    std::set<ConnectJob*>::const_iterator job = group->jobs().begin();
633    for (job = group->jobs().begin(); job != group->jobs().end(); job++) {
634      int source_id = (*job)->net_log().source().id;
635      connect_jobs_list->Append(new base::FundamentalValue(source_id));
636    }
637    group_dict->Set("connect_jobs", connect_jobs_list);
638
639    group_dict->SetBoolean("is_stalled",
640                           group->IsStalledOnPoolMaxSockets(
641                               max_sockets_per_group_));
642    group_dict->SetBoolean("backup_job_timer_is_running",
643                           group->BackupJobTimerIsRunning());
644
645    all_groups_dict->SetWithoutPathExpansion(it->first, group_dict);
646  }
647  dict->Set("groups", all_groups_dict);
648  return dict;
649}
650
651bool ClientSocketPoolBaseHelper::IdleSocket::IsUsable() const {
652  if (socket->WasEverUsed())
653    return socket->IsConnectedAndIdle();
654  return socket->IsConnected();
655}
656
657bool ClientSocketPoolBaseHelper::IdleSocket::ShouldCleanup(
658    base::TimeTicks now,
659    base::TimeDelta timeout) const {
660  bool timed_out = (now - start_time) >= timeout;
661  if (timed_out)
662    return true;
663  return !IsUsable();
664}
665
666void ClientSocketPoolBaseHelper::CleanupIdleSockets(bool force) {
667  if (idle_socket_count_ == 0)
668    return;
669
670  // Current time value. Retrieving it once at the function start rather than
671  // inside the inner loop, since it shouldn't change by any meaningful amount.
672  base::TimeTicks now = base::TimeTicks::Now();
673
674  GroupMap::iterator i = group_map_.begin();
675  while (i != group_map_.end()) {
676    Group* group = i->second;
677
678    std::list<IdleSocket>::iterator j = group->mutable_idle_sockets()->begin();
679    while (j != group->idle_sockets().end()) {
680      base::TimeDelta timeout =
681          j->socket->WasEverUsed() ?
682          used_idle_socket_timeout_ : unused_idle_socket_timeout_;
683      if (force || j->ShouldCleanup(now, timeout)) {
684        delete j->socket;
685        j = group->mutable_idle_sockets()->erase(j);
686        DecrementIdleCount();
687      } else {
688        ++j;
689      }
690    }
691
692    // Delete group if no longer needed.
693    if (group->IsEmpty()) {
694      RemoveGroup(i++);
695    } else {
696      ++i;
697    }
698  }
699}
700
701ClientSocketPoolBaseHelper::Group* ClientSocketPoolBaseHelper::GetOrCreateGroup(
702    const std::string& group_name) {
703  GroupMap::iterator it = group_map_.find(group_name);
704  if (it != group_map_.end())
705    return it->second;
706  Group* group = new Group;
707  group_map_[group_name] = group;
708  return group;
709}
710
711void ClientSocketPoolBaseHelper::RemoveGroup(const std::string& group_name) {
712  GroupMap::iterator it = group_map_.find(group_name);
713  CHECK(it != group_map_.end());
714
715  RemoveGroup(it);
716}
717
718void ClientSocketPoolBaseHelper::RemoveGroup(GroupMap::iterator it) {
719  delete it->second;
720  group_map_.erase(it);
721}
722
723// static
724bool ClientSocketPoolBaseHelper::connect_backup_jobs_enabled() {
725  return g_connect_backup_jobs_enabled;
726}
727
728// static
729bool ClientSocketPoolBaseHelper::set_connect_backup_jobs_enabled(bool enabled) {
730  bool old_value = g_connect_backup_jobs_enabled;
731  g_connect_backup_jobs_enabled = enabled;
732  return old_value;
733}
734
735void ClientSocketPoolBaseHelper::EnableConnectBackupJobs() {
736  connect_backup_jobs_enabled_ = g_connect_backup_jobs_enabled;
737}
738
739void ClientSocketPoolBaseHelper::IncrementIdleCount() {
740  if (++idle_socket_count_ == 1 && use_cleanup_timer_)
741    StartIdleSocketTimer();
742}
743
744void ClientSocketPoolBaseHelper::DecrementIdleCount() {
745  if (--idle_socket_count_ == 0)
746    timer_.Stop();
747}
748
749// static
750bool ClientSocketPoolBaseHelper::cleanup_timer_enabled() {
751  return g_cleanup_timer_enabled;
752}
753
754// static
755bool ClientSocketPoolBaseHelper::set_cleanup_timer_enabled(bool enabled) {
756  bool old_value = g_cleanup_timer_enabled;
757  g_cleanup_timer_enabled = enabled;
758  return old_value;
759}
760
761void ClientSocketPoolBaseHelper::StartIdleSocketTimer() {
762  timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kCleanupInterval), this,
763               &ClientSocketPoolBaseHelper::OnCleanupTimerFired);
764}
765
766void ClientSocketPoolBaseHelper::ReleaseSocket(const std::string& group_name,
767                                               scoped_ptr<StreamSocket> socket,
768                                               int id) {
769  GroupMap::iterator i = group_map_.find(group_name);
770  CHECK(i != group_map_.end());
771
772  Group* group = i->second;
773
774  CHECK_GT(handed_out_socket_count_, 0);
775  handed_out_socket_count_--;
776
777  CHECK_GT(group->active_socket_count(), 0);
778  group->DecrementActiveSocketCount();
779
780  const bool can_reuse = socket->IsConnectedAndIdle() &&
781      id == pool_generation_number_;
782  if (can_reuse) {
783    // Add it to the idle list.
784    AddIdleSocket(socket.Pass(), group);
785    OnAvailableSocketSlot(group_name, group);
786  } else {
787    socket.reset();
788  }
789
790  CheckForStalledSocketGroups();
791}
792
793void ClientSocketPoolBaseHelper::CheckForStalledSocketGroups() {
794  // If we have idle sockets, see if we can give one to the top-stalled group.
795  std::string top_group_name;
796  Group* top_group = NULL;
797  if (!FindTopStalledGroup(&top_group, &top_group_name)) {
798    // There may still be a stalled group in a lower level pool.
799    for (std::set<LowerLayeredPool*>::iterator it = lower_pools_.begin();
800         it != lower_pools_.end();
801         ++it) {
802       if ((*it)->IsStalled()) {
803         CloseOneIdleSocket();
804         break;
805       }
806    }
807    return;
808  }
809
810  if (ReachedMaxSocketsLimit()) {
811    if (idle_socket_count() > 0) {
812      CloseOneIdleSocket();
813    } else {
814      // We can't activate more sockets since we're already at our global
815      // limit.
816      return;
817    }
818  }
819
820  // Note:  we don't loop on waking stalled groups.  If the stalled group is at
821  //        its limit, may be left with other stalled groups that could be
822  //        woken.  This isn't optimal, but there is no starvation, so to avoid
823  //        the looping we leave it at this.
824  OnAvailableSocketSlot(top_group_name, top_group);
825}
826
827// Search for the highest priority pending request, amongst the groups that
828// are not at the |max_sockets_per_group_| limit. Note: for requests with
829// the same priority, the winner is based on group hash ordering (and not
830// insertion order).
831bool ClientSocketPoolBaseHelper::FindTopStalledGroup(
832    Group** group,
833    std::string* group_name) const {
834  CHECK((group && group_name) || (!group && !group_name));
835  Group* top_group = NULL;
836  const std::string* top_group_name = NULL;
837  bool has_stalled_group = false;
838  for (GroupMap::const_iterator i = group_map_.begin();
839       i != group_map_.end(); ++i) {
840    Group* curr_group = i->second;
841    if (!curr_group->has_pending_requests())
842      continue;
843    if (curr_group->IsStalledOnPoolMaxSockets(max_sockets_per_group_)) {
844      if (!group)
845        return true;
846      has_stalled_group = true;
847      bool has_higher_priority = !top_group ||
848          curr_group->TopPendingPriority() > top_group->TopPendingPriority();
849      if (has_higher_priority) {
850        top_group = curr_group;
851        top_group_name = &i->first;
852      }
853    }
854  }
855
856  if (top_group) {
857    CHECK(group);
858    *group = top_group;
859    *group_name = *top_group_name;
860  } else {
861    CHECK(!has_stalled_group);
862  }
863  return has_stalled_group;
864}
865
866void ClientSocketPoolBaseHelper::OnConnectJobComplete(
867    int result, ConnectJob* job) {
868  DCHECK_NE(ERR_IO_PENDING, result);
869  const std::string group_name = job->group_name();
870  GroupMap::iterator group_it = group_map_.find(group_name);
871  CHECK(group_it != group_map_.end());
872  Group* group = group_it->second;
873
874  scoped_ptr<StreamSocket> socket = job->PassSocket();
875
876  // Copies of these are needed because |job| may be deleted before they are
877  // accessed.
878  BoundNetLog job_log = job->net_log();
879  LoadTimingInfo::ConnectTiming connect_timing = job->connect_timing();
880
881  // RemoveConnectJob(job, _) must be called by all branches below;
882  // otherwise, |job| will be leaked.
883
884  if (result == OK) {
885    DCHECK(socket.get());
886    RemoveConnectJob(job, group);
887    scoped_ptr<const Request> request = group->PopNextPendingRequest();
888    if (request) {
889      LogBoundConnectJobToRequest(job_log.source(), *request);
890      HandOutSocket(
891          socket.Pass(), ClientSocketHandle::UNUSED, connect_timing,
892          request->handle(), base::TimeDelta(), group, request->net_log());
893      request->net_log().EndEvent(NetLog::TYPE_SOCKET_POOL);
894      InvokeUserCallbackLater(request->handle(), request->callback(), result);
895    } else {
896      AddIdleSocket(socket.Pass(), group);
897      OnAvailableSocketSlot(group_name, group);
898      CheckForStalledSocketGroups();
899    }
900  } else {
901    // If we got a socket, it must contain error information so pass that
902    // up so that the caller can retrieve it.
903    bool handed_out_socket = false;
904    scoped_ptr<const Request> request = group->PopNextPendingRequest();
905    if (request) {
906      LogBoundConnectJobToRequest(job_log.source(), *request);
907      job->GetAdditionalErrorState(request->handle());
908      RemoveConnectJob(job, group);
909      if (socket.get()) {
910        handed_out_socket = true;
911        HandOutSocket(socket.Pass(), ClientSocketHandle::UNUSED,
912                      connect_timing, request->handle(), base::TimeDelta(),
913                      group, request->net_log());
914      }
915      request->net_log().EndEventWithNetErrorCode(
916          NetLog::TYPE_SOCKET_POOL, result);
917      InvokeUserCallbackLater(request->handle(), request->callback(), result);
918    } else {
919      RemoveConnectJob(job, group);
920    }
921    if (!handed_out_socket) {
922      OnAvailableSocketSlot(group_name, group);
923      CheckForStalledSocketGroups();
924    }
925  }
926}
927
928void ClientSocketPoolBaseHelper::OnIPAddressChanged() {
929  FlushWithError(ERR_NETWORK_CHANGED);
930}
931
932void ClientSocketPoolBaseHelper::FlushWithError(int error) {
933  pool_generation_number_++;
934  CancelAllConnectJobs();
935  CloseIdleSockets();
936  CancelAllRequestsWithError(error);
937}
938
939void ClientSocketPoolBaseHelper::RemoveConnectJob(ConnectJob* job,
940                                                  Group* group) {
941  CHECK_GT(connecting_socket_count_, 0);
942  connecting_socket_count_--;
943
944  DCHECK(group);
945  group->RemoveJob(job);
946}
947
948void ClientSocketPoolBaseHelper::OnAvailableSocketSlot(
949    const std::string& group_name, Group* group) {
950  DCHECK(ContainsKey(group_map_, group_name));
951  if (group->IsEmpty()) {
952    RemoveGroup(group_name);
953  } else if (group->has_pending_requests()) {
954    ProcessPendingRequest(group_name, group);
955  }
956}
957
958void ClientSocketPoolBaseHelper::ProcessPendingRequest(
959    const std::string& group_name, Group* group) {
960  const Request* next_request = group->GetNextPendingRequest();
961  DCHECK(next_request);
962  int rv = RequestSocketInternal(group_name, *next_request);
963  if (rv != ERR_IO_PENDING) {
964    scoped_ptr<const Request> request = group->PopNextPendingRequest();
965    DCHECK(request);
966    if (group->IsEmpty())
967      RemoveGroup(group_name);
968
969    request->net_log().EndEventWithNetErrorCode(NetLog::TYPE_SOCKET_POOL, rv);
970    InvokeUserCallbackLater(request->handle(), request->callback(), rv);
971  }
972}
973
974void ClientSocketPoolBaseHelper::HandOutSocket(
975    scoped_ptr<StreamSocket> socket,
976    ClientSocketHandle::SocketReuseType reuse_type,
977    const LoadTimingInfo::ConnectTiming& connect_timing,
978    ClientSocketHandle* handle,
979    base::TimeDelta idle_time,
980    Group* group,
981    const BoundNetLog& net_log) {
982  DCHECK(socket);
983  handle->SetSocket(socket.Pass());
984  handle->set_reuse_type(reuse_type);
985  handle->set_idle_time(idle_time);
986  handle->set_pool_id(pool_generation_number_);
987  handle->set_connect_timing(connect_timing);
988
989  if (handle->is_reused()) {
990    net_log.AddEvent(
991        NetLog::TYPE_SOCKET_POOL_REUSED_AN_EXISTING_SOCKET,
992        NetLog::IntegerCallback(
993            "idle_ms", static_cast<int>(idle_time.InMilliseconds())));
994  }
995
996  net_log.AddEvent(
997      NetLog::TYPE_SOCKET_POOL_BOUND_TO_SOCKET,
998      handle->socket()->NetLog().source().ToEventParametersCallback());
999
1000  handed_out_socket_count_++;
1001  group->IncrementActiveSocketCount();
1002}
1003
1004void ClientSocketPoolBaseHelper::AddIdleSocket(
1005    scoped_ptr<StreamSocket> socket,
1006    Group* group) {
1007  DCHECK(socket);
1008  IdleSocket idle_socket;
1009  idle_socket.socket = socket.release();
1010  idle_socket.start_time = base::TimeTicks::Now();
1011
1012  group->mutable_idle_sockets()->push_back(idle_socket);
1013  IncrementIdleCount();
1014}
1015
1016void ClientSocketPoolBaseHelper::CancelAllConnectJobs() {
1017  for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end();) {
1018    Group* group = i->second;
1019    connecting_socket_count_ -= group->jobs().size();
1020    group->RemoveAllJobs();
1021
1022    // Delete group if no longer needed.
1023    if (group->IsEmpty()) {
1024      // RemoveGroup() will call .erase() which will invalidate the iterator,
1025      // but i will already have been incremented to a valid iterator before
1026      // RemoveGroup() is called.
1027      RemoveGroup(i++);
1028    } else {
1029      ++i;
1030    }
1031  }
1032  DCHECK_EQ(0, connecting_socket_count_);
1033}
1034
1035void ClientSocketPoolBaseHelper::CancelAllRequestsWithError(int error) {
1036  for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end();) {
1037    Group* group = i->second;
1038
1039    while (true) {
1040      scoped_ptr<const Request> request = group->PopNextPendingRequest();
1041      if (!request)
1042        break;
1043      InvokeUserCallbackLater(request->handle(), request->callback(), error);
1044    }
1045
1046    // Delete group if no longer needed.
1047    if (group->IsEmpty()) {
1048      // RemoveGroup() will call .erase() which will invalidate the iterator,
1049      // but i will already have been incremented to a valid iterator before
1050      // RemoveGroup() is called.
1051      RemoveGroup(i++);
1052    } else {
1053      ++i;
1054    }
1055  }
1056}
1057
1058bool ClientSocketPoolBaseHelper::ReachedMaxSocketsLimit() const {
1059  // Each connecting socket will eventually connect and be handed out.
1060  int total = handed_out_socket_count_ + connecting_socket_count_ +
1061      idle_socket_count();
1062  // There can be more sockets than the limit since some requests can ignore
1063  // the limit
1064  if (total < max_sockets_)
1065    return false;
1066  return true;
1067}
1068
1069bool ClientSocketPoolBaseHelper::CloseOneIdleSocket() {
1070  if (idle_socket_count() == 0)
1071    return false;
1072  return CloseOneIdleSocketExceptInGroup(NULL);
1073}
1074
1075bool ClientSocketPoolBaseHelper::CloseOneIdleSocketExceptInGroup(
1076    const Group* exception_group) {
1077  CHECK_GT(idle_socket_count(), 0);
1078
1079  for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end(); ++i) {
1080    Group* group = i->second;
1081    if (exception_group == group)
1082      continue;
1083    std::list<IdleSocket>* idle_sockets = group->mutable_idle_sockets();
1084
1085    if (!idle_sockets->empty()) {
1086      delete idle_sockets->front().socket;
1087      idle_sockets->pop_front();
1088      DecrementIdleCount();
1089      if (group->IsEmpty())
1090        RemoveGroup(i);
1091
1092      return true;
1093    }
1094  }
1095
1096  return false;
1097}
1098
1099bool ClientSocketPoolBaseHelper::CloseOneIdleConnectionInHigherLayeredPool() {
1100  // This pool doesn't have any idle sockets. It's possible that a pool at a
1101  // higher layer is holding one of this sockets active, but it's actually idle.
1102  // Query the higher layers.
1103  for (std::set<HigherLayeredPool*>::const_iterator it = higher_pools_.begin();
1104       it != higher_pools_.end(); ++it) {
1105    if ((*it)->CloseOneIdleConnection())
1106      return true;
1107  }
1108  return false;
1109}
1110
1111void ClientSocketPoolBaseHelper::InvokeUserCallbackLater(
1112    ClientSocketHandle* handle, const CompletionCallback& callback, int rv) {
1113  CHECK(!ContainsKey(pending_callback_map_, handle));
1114  pending_callback_map_[handle] = CallbackResultPair(callback, rv);
1115  base::MessageLoop::current()->PostTask(
1116      FROM_HERE,
1117      base::Bind(&ClientSocketPoolBaseHelper::InvokeUserCallback,
1118                 weak_factory_.GetWeakPtr(), handle));
1119}
1120
1121void ClientSocketPoolBaseHelper::InvokeUserCallback(
1122    ClientSocketHandle* handle) {
1123  PendingCallbackMap::iterator it = pending_callback_map_.find(handle);
1124
1125  // Exit if the request has already been cancelled.
1126  if (it == pending_callback_map_.end())
1127    return;
1128
1129  CHECK(!handle->is_initialized());
1130  CompletionCallback callback = it->second.callback;
1131  int result = it->second.result;
1132  pending_callback_map_.erase(it);
1133  callback.Run(result);
1134}
1135
1136void ClientSocketPoolBaseHelper::TryToCloseSocketsInLayeredPools() {
1137  while (IsStalled()) {
1138    // Closing a socket will result in calling back into |this| to use the freed
1139    // socket slot, so nothing else is needed.
1140    if (!CloseOneIdleConnectionInHigherLayeredPool())
1141      return;
1142  }
1143}
1144
1145ClientSocketPoolBaseHelper::Group::Group()
1146    : unassigned_job_count_(0),
1147      pending_requests_(NUM_PRIORITIES),
1148      active_socket_count_(0) {}
1149
1150ClientSocketPoolBaseHelper::Group::~Group() {
1151  DCHECK_EQ(0u, unassigned_job_count_);
1152}
1153
1154void ClientSocketPoolBaseHelper::Group::StartBackupJobTimer(
1155    const std::string& group_name,
1156    ClientSocketPoolBaseHelper* pool) {
1157  // Only allow one timer to run at a time.
1158  if (BackupJobTimerIsRunning())
1159    return;
1160
1161  // Unretained here is okay because |backup_job_timer_| is
1162  // automatically cancelled when it's destroyed.
1163  backup_job_timer_.Start(
1164      FROM_HERE, pool->ConnectRetryInterval(),
1165      base::Bind(&Group::OnBackupJobTimerFired, base::Unretained(this),
1166                 group_name, pool));
1167}
1168
1169bool ClientSocketPoolBaseHelper::Group::BackupJobTimerIsRunning() const {
1170  return backup_job_timer_.IsRunning();
1171}
1172
1173bool ClientSocketPoolBaseHelper::Group::TryToUseUnassignedConnectJob() {
1174  SanityCheck();
1175
1176  if (unassigned_job_count_ == 0)
1177    return false;
1178  --unassigned_job_count_;
1179  return true;
1180}
1181
1182void ClientSocketPoolBaseHelper::Group::AddJob(scoped_ptr<ConnectJob> job,
1183                                               bool is_preconnect) {
1184  SanityCheck();
1185
1186  if (is_preconnect)
1187    ++unassigned_job_count_;
1188  jobs_.insert(job.release());
1189}
1190
1191void ClientSocketPoolBaseHelper::Group::RemoveJob(ConnectJob* job) {
1192  scoped_ptr<ConnectJob> owned_job(job);
1193  SanityCheck();
1194
1195  std::set<ConnectJob*>::iterator it = jobs_.find(job);
1196  if (it != jobs_.end()) {
1197    jobs_.erase(it);
1198  } else {
1199    NOTREACHED();
1200  }
1201  size_t job_count = jobs_.size();
1202  if (job_count < unassigned_job_count_)
1203    unassigned_job_count_ = job_count;
1204
1205  // If we've got no more jobs for this group, then we no longer need a
1206  // backup job either.
1207  if (jobs_.empty())
1208    backup_job_timer_.Stop();
1209}
1210
1211void ClientSocketPoolBaseHelper::Group::OnBackupJobTimerFired(
1212    std::string group_name,
1213    ClientSocketPoolBaseHelper* pool) {
1214  // If there are no more jobs pending, there is no work to do.
1215  // If we've done our cleanups correctly, this should not happen.
1216  if (jobs_.empty()) {
1217    NOTREACHED();
1218    return;
1219  }
1220
1221  // If our old job is waiting on DNS, or if we can't create any sockets
1222  // right now due to limits, just reset the timer.
1223  if (pool->ReachedMaxSocketsLimit() ||
1224      !HasAvailableSocketSlot(pool->max_sockets_per_group_) ||
1225      (*jobs_.begin())->GetLoadState() == LOAD_STATE_RESOLVING_HOST) {
1226    StartBackupJobTimer(group_name, pool);
1227    return;
1228  }
1229
1230  if (pending_requests_.empty())
1231    return;
1232
1233  scoped_ptr<ConnectJob> backup_job =
1234      pool->connect_job_factory_->NewConnectJob(
1235          group_name, *pending_requests_.FirstMax().value(), pool);
1236  backup_job->net_log().AddEvent(NetLog::TYPE_BACKUP_CONNECT_JOB_CREATED);
1237  SIMPLE_STATS_COUNTER("socket.backup_created");
1238  int rv = backup_job->Connect();
1239  pool->connecting_socket_count_++;
1240  ConnectJob* raw_backup_job = backup_job.get();
1241  AddJob(backup_job.Pass(), false);
1242  if (rv != ERR_IO_PENDING)
1243    pool->OnConnectJobComplete(rv, raw_backup_job);
1244}
1245
1246void ClientSocketPoolBaseHelper::Group::SanityCheck() {
1247  DCHECK_LE(unassigned_job_count_, jobs_.size());
1248}
1249
1250void ClientSocketPoolBaseHelper::Group::RemoveAllJobs() {
1251  SanityCheck();
1252
1253  // Delete active jobs.
1254  STLDeleteElements(&jobs_);
1255  unassigned_job_count_ = 0;
1256
1257  // Stop backup job timer.
1258  backup_job_timer_.Stop();
1259}
1260
1261const ClientSocketPoolBaseHelper::Request*
1262ClientSocketPoolBaseHelper::Group::GetNextPendingRequest() const {
1263  return
1264      pending_requests_.empty() ? NULL : pending_requests_.FirstMax().value();
1265}
1266
1267bool ClientSocketPoolBaseHelper::Group::HasConnectJobForHandle(
1268    const ClientSocketHandle* handle) const {
1269  // Search the first |jobs_.size()| pending requests for |handle|.
1270  // If it's farther back in the deque than that, it doesn't have a
1271  // corresponding ConnectJob.
1272  size_t i = 0;
1273  for (RequestQueue::Pointer pointer = pending_requests_.FirstMax();
1274       !pointer.is_null() && i < jobs_.size();
1275       pointer = pending_requests_.GetNextTowardsLastMin(pointer), ++i) {
1276    if (pointer.value()->handle() == handle)
1277      return true;
1278  }
1279  return false;
1280}
1281
1282void ClientSocketPoolBaseHelper::Group::InsertPendingRequest(
1283    scoped_ptr<const Request> request) {
1284  // This value must be cached before we release |request|.
1285  RequestPriority priority = request->priority();
1286  if (request->ignore_limits()) {
1287    // Put requests with ignore_limits == true (which should have
1288    // priority == MAXIMUM_PRIORITY) ahead of other requests with
1289    // MAXIMUM_PRIORITY.
1290    DCHECK_EQ(priority, MAXIMUM_PRIORITY);
1291    pending_requests_.InsertAtFront(request.release(), priority);
1292  } else {
1293    pending_requests_.Insert(request.release(), priority);
1294  }
1295}
1296
1297scoped_ptr<const ClientSocketPoolBaseHelper::Request>
1298ClientSocketPoolBaseHelper::Group::PopNextPendingRequest() {
1299  if (pending_requests_.empty())
1300    return scoped_ptr<const ClientSocketPoolBaseHelper::Request>();
1301  return RemovePendingRequest(pending_requests_.FirstMax());
1302}
1303
1304scoped_ptr<const ClientSocketPoolBaseHelper::Request>
1305ClientSocketPoolBaseHelper::Group::FindAndRemovePendingRequest(
1306    ClientSocketHandle* handle) {
1307  for (RequestQueue::Pointer pointer = pending_requests_.FirstMax();
1308       !pointer.is_null();
1309       pointer = pending_requests_.GetNextTowardsLastMin(pointer)) {
1310    if (pointer.value()->handle() == handle) {
1311      scoped_ptr<const Request> request = RemovePendingRequest(pointer);
1312      return request.Pass();
1313    }
1314  }
1315  return scoped_ptr<const ClientSocketPoolBaseHelper::Request>();
1316}
1317
1318scoped_ptr<const ClientSocketPoolBaseHelper::Request>
1319ClientSocketPoolBaseHelper::Group::RemovePendingRequest(
1320    const RequestQueue::Pointer& pointer) {
1321  scoped_ptr<const Request> request(pointer.value());
1322  pending_requests_.Erase(pointer);
1323  // If there are no more requests, kill the backup timer.
1324  if (pending_requests_.empty())
1325    backup_job_timer_.Stop();
1326  return request.Pass();
1327}
1328
1329}  // namespace internal
1330
1331}  // namespace net
1332