url_request_http_job.cc revision 7d4cd473f85ac64c3747c96c277f9e506a0d2246
1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/url_request/url_request_http_job.h"
6
7#include "base/base_switches.h"
8#include "base/bind.h"
9#include "base/bind_helpers.h"
10#include "base/command_line.h"
11#include "base/compiler_specific.h"
12#include "base/file_version_info.h"
13#include "base/message_loop.h"
14#include "base/metrics/field_trial.h"
15#include "base/metrics/histogram.h"
16#include "base/rand_util.h"
17#include "base/strings/string_util.h"
18#include "base/time.h"
19#include "net/base/filter.h"
20#include "net/base/host_port_pair.h"
21#include "net/base/load_flags.h"
22#include "net/base/mime_util.h"
23#include "net/base/net_errors.h"
24#include "net/base/net_util.h"
25#include "net/base/network_delegate.h"
26#include "net/base/sdch_manager.h"
27#include "net/cert/cert_status_flags.h"
28#include "net/cookies/cookie_monster.h"
29#include "net/http/http_network_session.h"
30#include "net/http/http_request_headers.h"
31#include "net/http/http_response_headers.h"
32#include "net/http/http_response_info.h"
33#include "net/http/http_status_code.h"
34#include "net/http/http_transaction.h"
35#include "net/http/http_transaction_delegate.h"
36#include "net/http/http_transaction_factory.h"
37#include "net/http/http_util.h"
38#include "net/ssl/ssl_cert_request_info.h"
39#include "net/ssl/ssl_config_service.h"
40#include "net/url_request/fraudulent_certificate_reporter.h"
41#include "net/url_request/http_user_agent_settings.h"
42#include "net/url_request/url_request.h"
43#include "net/url_request/url_request_context.h"
44#include "net/url_request/url_request_error_job.h"
45#include "net/url_request/url_request_job_factory.h"
46#include "net/url_request/url_request_redirect_job.h"
47#include "net/url_request/url_request_throttler_header_adapter.h"
48#include "net/url_request/url_request_throttler_manager.h"
49
50static const char kAvailDictionaryHeader[] = "Avail-Dictionary";
51
52namespace net {
53
54class URLRequestHttpJob::HttpFilterContext : public FilterContext {
55 public:
56  explicit HttpFilterContext(URLRequestHttpJob* job);
57  virtual ~HttpFilterContext();
58
59  // FilterContext implementation.
60  virtual bool GetMimeType(std::string* mime_type) const OVERRIDE;
61  virtual bool GetURL(GURL* gurl) const OVERRIDE;
62  virtual base::Time GetRequestTime() const OVERRIDE;
63  virtual bool IsCachedContent() const OVERRIDE;
64  virtual bool IsDownload() const OVERRIDE;
65  virtual bool IsSdchResponse() const OVERRIDE;
66  virtual int64 GetByteReadCount() const OVERRIDE;
67  virtual int GetResponseCode() const OVERRIDE;
68  virtual void RecordPacketStats(StatisticSelector statistic) const OVERRIDE;
69
70  // Method to allow us to reset filter context for a response that should have
71  // been SDCH encoded when there is an update due to an explicit HTTP header.
72  void ResetSdchResponseToFalse();
73
74 private:
75  URLRequestHttpJob* job_;
76
77  DISALLOW_COPY_AND_ASSIGN(HttpFilterContext);
78};
79
80class URLRequestHttpJob::HttpTransactionDelegateImpl
81    : public HttpTransactionDelegate {
82 public:
83  HttpTransactionDelegateImpl(
84      URLRequest* request, NetworkDelegate* network_delegate)
85      : request_(request),
86        network_delegate_(network_delegate),
87        cache_active_(false),
88        network_active_(false) {
89  }
90  virtual ~HttpTransactionDelegateImpl() {
91    OnDetachRequest();
92  }
93  void OnDetachRequest() {
94    if (request_ == NULL || network_delegate_ == NULL)
95      return;
96    network_delegate_->NotifyRequestWaitStateChange(
97        *request_,
98        NetworkDelegate::REQUEST_WAIT_STATE_RESET);
99    cache_active_ = false;
100    network_active_ = false;
101    request_ = NULL;
102  }
103  virtual void OnCacheActionStart() OVERRIDE {
104    if (request_ == NULL || network_delegate_ == NULL)
105      return;
106    DCHECK(!cache_active_ && !network_active_);
107    cache_active_ = true;
108    network_delegate_->NotifyRequestWaitStateChange(
109        *request_,
110        NetworkDelegate::REQUEST_WAIT_STATE_CACHE_START);
111  }
112  virtual void OnCacheActionFinish() OVERRIDE {
113    if (request_ == NULL || network_delegate_ == NULL)
114      return;
115    DCHECK(cache_active_ && !network_active_);
116    cache_active_ = false;
117    network_delegate_->NotifyRequestWaitStateChange(
118        *request_,
119        NetworkDelegate::REQUEST_WAIT_STATE_CACHE_FINISH);
120  }
121  virtual void OnNetworkActionStart() OVERRIDE {
122    if (request_ == NULL || network_delegate_ == NULL)
123      return;
124    DCHECK(!cache_active_ && !network_active_);
125    network_active_ = true;
126    network_delegate_->NotifyRequestWaitStateChange(
127        *request_,
128        NetworkDelegate::REQUEST_WAIT_STATE_NETWORK_START);
129  }
130  virtual void OnNetworkActionFinish() OVERRIDE {
131    if (request_ == NULL || network_delegate_ == NULL)
132      return;
133    DCHECK(!cache_active_ && network_active_);
134    network_active_ = false;
135    network_delegate_->NotifyRequestWaitStateChange(
136        *request_,
137        NetworkDelegate::REQUEST_WAIT_STATE_NETWORK_FINISH);
138  }
139 private:
140  URLRequest* request_;
141  NetworkDelegate* network_delegate_;
142  bool cache_active_;
143  bool network_active_;
144};
145
146URLRequestHttpJob::HttpFilterContext::HttpFilterContext(URLRequestHttpJob* job)
147    : job_(job) {
148  DCHECK(job_);
149}
150
151URLRequestHttpJob::HttpFilterContext::~HttpFilterContext() {
152}
153
154bool URLRequestHttpJob::HttpFilterContext::GetMimeType(
155    std::string* mime_type) const {
156  return job_->GetMimeType(mime_type);
157}
158
159bool URLRequestHttpJob::HttpFilterContext::GetURL(GURL* gurl) const {
160  if (!job_->request())
161    return false;
162  *gurl = job_->request()->url();
163  return true;
164}
165
166base::Time URLRequestHttpJob::HttpFilterContext::GetRequestTime() const {
167  return job_->request() ? job_->request()->request_time() : base::Time();
168}
169
170bool URLRequestHttpJob::HttpFilterContext::IsCachedContent() const {
171  return job_->is_cached_content_;
172}
173
174bool URLRequestHttpJob::HttpFilterContext::IsDownload() const {
175  return (job_->request_info_.load_flags & LOAD_IS_DOWNLOAD) != 0;
176}
177
178void URLRequestHttpJob::HttpFilterContext::ResetSdchResponseToFalse() {
179  DCHECK(job_->sdch_dictionary_advertised_);
180  job_->sdch_dictionary_advertised_ = false;
181}
182
183bool URLRequestHttpJob::HttpFilterContext::IsSdchResponse() const {
184  return job_->sdch_dictionary_advertised_;
185}
186
187int64 URLRequestHttpJob::HttpFilterContext::GetByteReadCount() const {
188  return job_->filter_input_byte_count();
189}
190
191int URLRequestHttpJob::HttpFilterContext::GetResponseCode() const {
192  return job_->GetResponseCode();
193}
194
195void URLRequestHttpJob::HttpFilterContext::RecordPacketStats(
196    StatisticSelector statistic) const {
197  job_->RecordPacketStats(statistic);
198}
199
200// TODO(darin): make sure the port blocking code is not lost
201// static
202URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request,
203                                          NetworkDelegate* network_delegate,
204                                          const std::string& scheme) {
205  DCHECK(scheme == "http" || scheme == "https");
206
207  if (!request->context()->http_transaction_factory()) {
208    NOTREACHED() << "requires a valid context";
209    return new URLRequestErrorJob(
210        request, network_delegate, ERR_INVALID_ARGUMENT);
211  }
212
213  GURL redirect_url;
214  if (request->GetHSTSRedirect(&redirect_url)) {
215    return new URLRequestRedirectJob(
216        request, network_delegate, redirect_url,
217        // Use status code 307 to preserve the method, so POST requests work.
218        URLRequestRedirectJob::REDIRECT_307_TEMPORARY_REDIRECT);
219  }
220  return new URLRequestHttpJob(request,
221                               network_delegate,
222                               request->context()->http_user_agent_settings());
223}
224
225URLRequestHttpJob::URLRequestHttpJob(
226    URLRequest* request,
227    NetworkDelegate* network_delegate,
228    const HttpUserAgentSettings* http_user_agent_settings)
229    : URLRequestJob(request, network_delegate),
230      priority_(DEFAULT_PRIORITY),
231      response_info_(NULL),
232      response_cookies_save_index_(0),
233      proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH),
234      server_auth_state_(AUTH_STATE_DONT_NEED_AUTH),
235      start_callback_(base::Bind(&URLRequestHttpJob::OnStartCompleted,
236                                 base::Unretained(this))),
237      notify_before_headers_sent_callback_(
238          base::Bind(&URLRequestHttpJob::NotifyBeforeSendHeadersCallback,
239                     base::Unretained(this))),
240      read_in_progress_(false),
241      throttling_entry_(NULL),
242      sdch_dictionary_advertised_(false),
243      sdch_test_activated_(false),
244      sdch_test_control_(false),
245      is_cached_content_(false),
246      request_creation_time_(),
247      packet_timing_enabled_(false),
248      done_(false),
249      bytes_observed_in_packets_(0),
250      request_time_snapshot_(),
251      final_packet_time_(),
252      filter_context_(new HttpFilterContext(this)),
253      weak_factory_(this),
254      on_headers_received_callback_(
255          base::Bind(&URLRequestHttpJob::OnHeadersReceivedCallback,
256                     base::Unretained(this))),
257      awaiting_callback_(false),
258      http_transaction_delegate_(
259          new HttpTransactionDelegateImpl(request, network_delegate)),
260      http_user_agent_settings_(http_user_agent_settings) {
261  URLRequestThrottlerManager* manager = request->context()->throttler_manager();
262  if (manager)
263    throttling_entry_ = manager->RegisterRequestUrl(request->url());
264
265  ResetTimer();
266}
267
268URLRequestHttpJob::~URLRequestHttpJob() {
269  CHECK(!awaiting_callback_);
270
271  DCHECK(!sdch_test_control_ || !sdch_test_activated_);
272  if (!is_cached_content_) {
273    if (sdch_test_control_)
274      RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK);
275    if (sdch_test_activated_)
276      RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE);
277  }
278  // Make sure SDCH filters are told to emit histogram data while
279  // filter_context_ is still alive.
280  DestroyFilters();
281
282  if (sdch_dictionary_url_.is_valid()) {
283    // Prior to reaching the destructor, request_ has been set to a NULL
284    // pointer, so request_->url() is no longer valid in the destructor, and we
285    // use an alternate copy |request_info_.url|.
286    SdchManager* manager = SdchManager::Global();
287    // To be extra safe, since this is a "different time" from when we decided
288    // to get the dictionary, we'll validate that an SdchManager is available.
289    // At shutdown time, care is taken to be sure that we don't delete this
290    // globally useful instance "too soon," so this check is just defensive
291    // coding to assure that IF the system is shutting down, we don't have any
292    // problem if the manager was deleted ahead of time.
293    if (manager)  // Defensive programming.
294      manager->FetchDictionary(request_info_.url, sdch_dictionary_url_);
295  }
296  DoneWithRequest(ABORTED);
297}
298
299void URLRequestHttpJob::SetPriority(RequestPriority priority) {
300  priority_ = priority;
301  if (transaction_)
302    transaction_->SetPriority(priority_);
303}
304
305void URLRequestHttpJob::Start() {
306  DCHECK(!transaction_.get());
307
308  // URLRequest::SetReferrer ensures that we do not send username and password
309  // fields in the referrer.
310  GURL referrer(request_->referrer());
311
312  request_info_.url = request_->url();
313  request_info_.method = request_->method();
314  request_info_.load_flags = request_->load_flags();
315  request_info_.request_id = request_->identifier();
316  // Enable privacy mode if cookie settings or flags tell us not send or
317  // save cookies.
318  bool enable_privacy_mode =
319      (request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES) ||
320      (request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) ||
321      CanEnablePrivacyMode();
322  // Privacy mode could still be disabled in OnCookiesLoaded if we are going
323  // to send previously saved cookies.
324  request_info_.privacy_mode = enable_privacy_mode ?
325      kPrivacyModeEnabled : kPrivacyModeDisabled;
326
327  // Strip Referer from request_info_.extra_headers to prevent, e.g., plugins
328  // from overriding headers that are controlled using other means. Otherwise a
329  // plugin could set a referrer although sending the referrer is inhibited.
330  request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kReferer);
331
332  // Our consumer should have made sure that this is a safe referrer.  See for
333  // instance WebCore::FrameLoader::HideReferrer.
334  if (referrer.is_valid()) {
335    request_info_.extra_headers.SetHeader(HttpRequestHeaders::kReferer,
336                                          referrer.spec());
337  }
338
339  request_info_.extra_headers.SetHeaderIfMissing(
340      HttpRequestHeaders::kUserAgent,
341      http_user_agent_settings_ ?
342          http_user_agent_settings_->GetUserAgent(request_->url()) :
343          EmptyString());
344
345  AddExtraHeaders();
346  AddCookieHeaderAndStart();
347}
348
349void URLRequestHttpJob::Kill() {
350  http_transaction_delegate_->OnDetachRequest();
351
352  if (!transaction_.get())
353    return;
354
355  weak_factory_.InvalidateWeakPtrs();
356  DestroyTransaction();
357  URLRequestJob::Kill();
358}
359
360void URLRequestHttpJob::NotifyHeadersComplete() {
361  DCHECK(!response_info_);
362
363  response_info_ = transaction_->GetResponseInfo();
364
365  // Save boolean, as we'll need this info at destruction time, and filters may
366  // also need this info.
367  is_cached_content_ = response_info_->was_cached;
368
369  if (!is_cached_content_ && throttling_entry_.get()) {
370    URLRequestThrottlerHeaderAdapter response_adapter(GetResponseHeaders());
371    throttling_entry_->UpdateWithResponse(request_info_.url.host(),
372                                          &response_adapter);
373  }
374
375  // The ordering of these calls is not important.
376  ProcessStrictTransportSecurityHeader();
377  ProcessPublicKeyPinsHeader();
378
379  if (SdchManager::Global() &&
380      SdchManager::Global()->IsInSupportedDomain(request_->url())) {
381    const std::string name = "Get-Dictionary";
382    std::string url_text;
383    void* iter = NULL;
384    // TODO(jar): We need to not fetch dictionaries the first time they are
385    // seen, but rather wait until we can justify their usefulness.
386    // For now, we will only fetch the first dictionary, which will at least
387    // require multiple suggestions before we get additional ones for this site.
388    // Eventually we should wait until a dictionary is requested several times
389    // before we even download it (so that we don't waste memory or bandwidth).
390    if (GetResponseHeaders()->EnumerateHeader(&iter, name, &url_text)) {
391      // request_->url() won't be valid in the destructor, so we use an
392      // alternate copy.
393      DCHECK_EQ(request_->url(), request_info_.url);
394      // Resolve suggested URL relative to request url.
395      sdch_dictionary_url_ = request_info_.url.Resolve(url_text);
396    }
397  }
398
399  // The HTTP transaction may be restarted several times for the purposes
400  // of sending authorization information. Each time it restarts, we get
401  // notified of the headers completion so that we can update the cookie store.
402  if (transaction_->IsReadyToRestartForAuth()) {
403    DCHECK(!response_info_->auth_challenge.get());
404    // TODO(battre): This breaks the webrequest API for
405    // URLRequestTestHTTP.BasicAuthWithCookies
406    // where OnBeforeSendHeaders -> OnSendHeaders -> OnBeforeSendHeaders
407    // occurs.
408    RestartTransactionWithAuth(AuthCredentials());
409    return;
410  }
411
412  URLRequestJob::NotifyHeadersComplete();
413}
414
415void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) {
416  DoneWithRequest(FINISHED);
417  URLRequestJob::NotifyDone(status);
418}
419
420void URLRequestHttpJob::DestroyTransaction() {
421  DCHECK(transaction_.get());
422
423  DoneWithRequest(ABORTED);
424  transaction_.reset();
425  response_info_ = NULL;
426  receive_headers_end_ = base::TimeTicks();
427}
428
429void URLRequestHttpJob::StartTransaction() {
430  if (network_delegate()) {
431    int rv = network_delegate()->NotifyBeforeSendHeaders(
432        request_, notify_before_headers_sent_callback_,
433        &request_info_.extra_headers);
434    // If an extension blocks the request, we rely on the callback to
435    // MaybeStartTransactionInternal().
436    if (rv == ERR_IO_PENDING) {
437      SetBlockedOnDelegate();
438      return;
439    }
440    MaybeStartTransactionInternal(rv);
441    return;
442  }
443  StartTransactionInternal();
444}
445
446void URLRequestHttpJob::NotifyBeforeSendHeadersCallback(int result) {
447  SetUnblockedOnDelegate();
448
449  // Check that there are no callbacks to already canceled requests.
450  DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status());
451
452  MaybeStartTransactionInternal(result);
453}
454
455void URLRequestHttpJob::MaybeStartTransactionInternal(int result) {
456  if (result == OK) {
457    StartTransactionInternal();
458  } else {
459    std::string source("delegate");
460    request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
461                                 NetLog::StringCallback("source", &source));
462    NotifyCanceled();
463    NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
464  }
465}
466
467void URLRequestHttpJob::StartTransactionInternal() {
468  // NOTE: This method assumes that request_info_ is already setup properly.
469
470  // If we already have a transaction, then we should restart the transaction
471  // with auth provided by auth_credentials_.
472
473  int rv;
474
475  if (network_delegate()) {
476    network_delegate()->NotifySendHeaders(
477        request_, request_info_.extra_headers);
478  }
479
480  if (transaction_.get()) {
481    rv = transaction_->RestartWithAuth(auth_credentials_, start_callback_);
482    auth_credentials_ = AuthCredentials();
483  } else {
484    DCHECK(request_->context()->http_transaction_factory());
485
486    rv = request_->context()->http_transaction_factory()->CreateTransaction(
487        priority_, &transaction_, http_transaction_delegate_.get());
488    if (rv == OK) {
489      if (!throttling_entry_.get() ||
490          !throttling_entry_->ShouldRejectRequest(*request_)) {
491        rv = transaction_->Start(
492            &request_info_, start_callback_, request_->net_log());
493        start_time_ = base::TimeTicks::Now();
494      } else {
495        // Special error code for the exponential back-off module.
496        rv = ERR_TEMPORARILY_THROTTLED;
497      }
498    }
499  }
500
501  if (rv == ERR_IO_PENDING)
502    return;
503
504  // The transaction started synchronously, but we need to notify the
505  // URLRequest delegate via the message loop.
506  base::MessageLoop::current()->PostTask(
507      FROM_HERE,
508      base::Bind(&URLRequestHttpJob::OnStartCompleted,
509                 weak_factory_.GetWeakPtr(), rv));
510}
511
512void URLRequestHttpJob::AddExtraHeaders() {
513  // Supply Accept-Encoding field only if it is not already provided.
514  // It should be provided IF the content is known to have restrictions on
515  // potential encoding, such as streaming multi-media.
516  // For details see bug 47381.
517  // TODO(jar, enal): jpeg files etc. should set up a request header if
518  // possible. Right now it is done only by buffered_resource_loader and
519  // simple_data_source.
520  if (!request_info_.extra_headers.HasHeader(
521      HttpRequestHeaders::kAcceptEncoding)) {
522    bool advertise_sdch = SdchManager::Global() &&
523        SdchManager::Global()->IsInSupportedDomain(request_->url());
524    std::string avail_dictionaries;
525    if (advertise_sdch) {
526      SdchManager::Global()->GetAvailDictionaryList(request_->url(),
527                                                    &avail_dictionaries);
528
529      // The AllowLatencyExperiment() is only true if we've successfully done a
530      // full SDCH compression recently in this browser session for this host.
531      // Note that for this path, there might be no applicable dictionaries,
532      // and hence we can't participate in the experiment.
533      if (!avail_dictionaries.empty() &&
534          SdchManager::Global()->AllowLatencyExperiment(request_->url())) {
535        // We are participating in the test (or control), and hence we'll
536        // eventually record statistics via either SDCH_EXPERIMENT_DECODE or
537        // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data.
538        packet_timing_enabled_ = true;
539        if (base::RandDouble() < .01) {
540          sdch_test_control_ = true;  // 1% probability.
541          advertise_sdch = false;
542        } else {
543          sdch_test_activated_ = true;
544        }
545      }
546    }
547
548    // Supply Accept-Encoding headers first so that it is more likely that they
549    // will be in the first transmitted packet.  This can sometimes make it
550    // easier to filter and analyze the streams to assure that a proxy has not
551    // damaged these headers.  Some proxies deliberately corrupt Accept-Encoding
552    // headers.
553    if (!advertise_sdch) {
554      // Tell the server what compression formats we support (other than SDCH).
555      request_info_.extra_headers.SetHeader(
556          HttpRequestHeaders::kAcceptEncoding, "gzip,deflate");
557    } else {
558      // Include SDCH in acceptable list.
559      request_info_.extra_headers.SetHeader(
560          HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch");
561      if (!avail_dictionaries.empty()) {
562        request_info_.extra_headers.SetHeader(
563            kAvailDictionaryHeader,
564            avail_dictionaries);
565        sdch_dictionary_advertised_ = true;
566        // Since we're tagging this transaction as advertising a dictionary,
567        // we'll definitely employ an SDCH filter (or tentative sdch filter)
568        // when we get a response.  When done, we'll record histograms via
569        // SDCH_DECODE or SDCH_PASSTHROUGH.  Hence we need to record packet
570        // arrival times.
571        packet_timing_enabled_ = true;
572      }
573    }
574  }
575
576  if (http_user_agent_settings_) {
577    // Only add default Accept-Language if the request didn't have it
578    // specified.
579    std::string accept_language =
580        http_user_agent_settings_->GetAcceptLanguage();
581    if (!accept_language.empty()) {
582      request_info_.extra_headers.SetHeaderIfMissing(
583          HttpRequestHeaders::kAcceptLanguage,
584          accept_language);
585    }
586  }
587}
588
589void URLRequestHttpJob::AddCookieHeaderAndStart() {
590  // No matter what, we want to report our status as IO pending since we will
591  // be notifying our consumer asynchronously via OnStartCompleted.
592  SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
593
594  // If the request was destroyed, then there is no more work to do.
595  if (!request_)
596    return;
597
598  CookieStore* cookie_store = request_->context()->cookie_store();
599  if (cookie_store && !(request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES)) {
600    net::CookieMonster* cookie_monster = cookie_store->GetCookieMonster();
601    if (cookie_monster) {
602      cookie_monster->GetAllCookiesForURLAsync(
603          request_->url(),
604          base::Bind(&URLRequestHttpJob::CheckCookiePolicyAndLoad,
605                     weak_factory_.GetWeakPtr()));
606    } else {
607      CheckCookiePolicyAndLoad(CookieList());
608    }
609  } else {
610    DoStartTransaction();
611  }
612}
613
614void URLRequestHttpJob::DoLoadCookies() {
615  CookieOptions options;
616  options.set_include_httponly();
617  request_->context()->cookie_store()->GetCookiesWithOptionsAsync(
618      request_->url(), options,
619      base::Bind(&URLRequestHttpJob::OnCookiesLoaded,
620                 weak_factory_.GetWeakPtr()));
621}
622
623void URLRequestHttpJob::CheckCookiePolicyAndLoad(
624    const CookieList& cookie_list) {
625  if (CanGetCookies(cookie_list))
626    DoLoadCookies();
627  else
628    DoStartTransaction();
629}
630
631void URLRequestHttpJob::OnCookiesLoaded(const std::string& cookie_line) {
632  if (!cookie_line.empty()) {
633    request_info_.extra_headers.SetHeader(
634        HttpRequestHeaders::kCookie, cookie_line);
635    // Disable privacy mode as we are sending cookies anyway.
636    request_info_.privacy_mode = kPrivacyModeDisabled;
637  }
638  DoStartTransaction();
639}
640
641void URLRequestHttpJob::DoStartTransaction() {
642  // We may have been canceled while retrieving cookies.
643  if (GetStatus().is_success()) {
644    StartTransaction();
645  } else {
646    NotifyCanceled();
647  }
648}
649
650void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete(int result) {
651  if (result != net::OK) {
652    std::string source("delegate");
653    request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
654                                 NetLog::StringCallback("source", &source));
655    NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
656    return;
657  }
658
659  DCHECK(transaction_.get());
660
661  const HttpResponseInfo* response_info = transaction_->GetResponseInfo();
662  DCHECK(response_info);
663
664  response_cookies_.clear();
665  response_cookies_save_index_ = 0;
666
667  FetchResponseCookies(&response_cookies_);
668
669  if (!GetResponseHeaders()->GetDateValue(&response_date_))
670    response_date_ = base::Time();
671
672  // Now, loop over the response cookies, and attempt to persist each.
673  SaveNextCookie();
674}
675
676// If the save occurs synchronously, SaveNextCookie will loop and save the next
677// cookie. If the save is deferred, the callback is responsible for continuing
678// to iterate through the cookies.
679// TODO(erikwright): Modify the CookieStore API to indicate via return value
680// whether it completed synchronously or asynchronously.
681// See http://crbug.com/131066.
682void URLRequestHttpJob::SaveNextCookie() {
683  // No matter what, we want to report our status as IO pending since we will
684  // be notifying our consumer asynchronously via OnStartCompleted.
685  SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
686
687  // Used to communicate with the callback. See the implementation of
688  // OnCookieSaved.
689  scoped_refptr<SharedBoolean> callback_pending = new SharedBoolean(false);
690  scoped_refptr<SharedBoolean> save_next_cookie_running =
691      new SharedBoolean(true);
692
693  if (!(request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) &&
694      request_->context()->cookie_store() &&
695      response_cookies_.size() > 0) {
696    CookieOptions options;
697    options.set_include_httponly();
698    options.set_server_time(response_date_);
699
700    net::CookieStore::SetCookiesCallback callback(
701        base::Bind(&URLRequestHttpJob::OnCookieSaved,
702                   weak_factory_.GetWeakPtr(),
703                   save_next_cookie_running,
704                   callback_pending));
705
706    // Loop through the cookies as long as SetCookieWithOptionsAsync completes
707    // synchronously.
708    while (!callback_pending->data &&
709           response_cookies_save_index_ < response_cookies_.size()) {
710      if (CanSetCookie(
711          response_cookies_[response_cookies_save_index_], &options)) {
712        callback_pending->data = true;
713        request_->context()->cookie_store()->SetCookieWithOptionsAsync(
714            request_->url(), response_cookies_[response_cookies_save_index_],
715            options, callback);
716      }
717      ++response_cookies_save_index_;
718    }
719  }
720
721  save_next_cookie_running->data = false;
722
723  if (!callback_pending->data) {
724    response_cookies_.clear();
725    response_cookies_save_index_ = 0;
726    SetStatus(URLRequestStatus());  // Clear the IO_PENDING status
727    NotifyHeadersComplete();
728    return;
729  }
730}
731
732// |save_next_cookie_running| is true when the callback is bound and set to
733// false when SaveNextCookie exits, allowing the callback to determine if the
734// save occurred synchronously or asynchronously.
735// |callback_pending| is false when the callback is invoked and will be set to
736// true by the callback, allowing SaveNextCookie to detect whether the save
737// occurred synchronously.
738// See SaveNextCookie() for more information.
739void URLRequestHttpJob::OnCookieSaved(
740    scoped_refptr<SharedBoolean> save_next_cookie_running,
741    scoped_refptr<SharedBoolean> callback_pending,
742    bool cookie_status) {
743  callback_pending->data = false;
744
745  // If we were called synchronously, return.
746  if (save_next_cookie_running->data) {
747    return;
748  }
749
750  // We were called asynchronously, so trigger the next save.
751  // We may have been canceled within OnSetCookie.
752  if (GetStatus().is_success()) {
753    SaveNextCookie();
754  } else {
755    NotifyCanceled();
756  }
757}
758
759void URLRequestHttpJob::FetchResponseCookies(
760    std::vector<std::string>* cookies) {
761  const std::string name = "Set-Cookie";
762  std::string value;
763
764  void* iter = NULL;
765  HttpResponseHeaders* headers = GetResponseHeaders();
766  while (headers->EnumerateHeader(&iter, name, &value)) {
767    if (!value.empty())
768      cookies->push_back(value);
769  }
770}
771
772// NOTE: |ProcessStrictTransportSecurityHeader| and
773// |ProcessPublicKeyPinsHeader| have very similar structures, by design.
774void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() {
775  DCHECK(response_info_);
776  TransportSecurityState* security_state =
777      request_->context()->transport_security_state();
778  const SSLInfo& ssl_info = response_info_->ssl_info;
779
780  // Only accept HSTS headers on HTTPS connections that have no
781  // certificate errors.
782  if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) ||
783      !security_state)
784    return;
785
786  // http://tools.ietf.org/html/draft-ietf-websec-strict-transport-sec:
787  //
788  //   If a UA receives more than one STS header field in a HTTP response
789  //   message over secure transport, then the UA MUST process only the
790  //   first such header field.
791  HttpResponseHeaders* headers = GetResponseHeaders();
792  std::string value;
793  if (headers->EnumerateHeader(NULL, "Strict-Transport-Security", &value))
794    security_state->AddHSTSHeader(request_info_.url.host(), value);
795}
796
797void URLRequestHttpJob::ProcessPublicKeyPinsHeader() {
798  DCHECK(response_info_);
799  TransportSecurityState* security_state =
800      request_->context()->transport_security_state();
801  const SSLInfo& ssl_info = response_info_->ssl_info;
802
803  // Only accept HPKP headers on HTTPS connections that have no
804  // certificate errors.
805  if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) ||
806      !security_state)
807    return;
808
809  // http://tools.ietf.org/html/draft-ietf-websec-key-pinning:
810  //
811  //   If a UA receives more than one PKP header field in an HTTP
812  //   response message over secure transport, then the UA MUST process
813  //   only the first such header field.
814  HttpResponseHeaders* headers = GetResponseHeaders();
815  std::string value;
816  if (headers->EnumerateHeader(NULL, "Public-Key-Pins", &value))
817    security_state->AddHPKPHeader(request_info_.url.host(), value, ssl_info);
818}
819
820void URLRequestHttpJob::OnStartCompleted(int result) {
821  RecordTimer();
822
823  // If the request was destroyed, then there is no more work to do.
824  if (!request_)
825    return;
826
827  // If the transaction was destroyed, then the job was cancelled, and
828  // we can just ignore this notification.
829  if (!transaction_.get())
830    return;
831
832  receive_headers_end_ = base::TimeTicks::Now();
833
834  // Clear the IO_PENDING status
835  SetStatus(URLRequestStatus());
836
837  const URLRequestContext* context = request_->context();
838
839  if (result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN &&
840      transaction_->GetResponseInfo() != NULL) {
841    FraudulentCertificateReporter* reporter =
842      context->fraudulent_certificate_reporter();
843    if (reporter != NULL) {
844      const SSLInfo& ssl_info = transaction_->GetResponseInfo()->ssl_info;
845      bool sni_available = SSLConfigService::IsSNIAvailable(
846          context->ssl_config_service());
847      const std::string& host = request_->url().host();
848
849      reporter->SendReport(host, ssl_info, sni_available);
850    }
851  }
852
853  if (result == OK) {
854    scoped_refptr<HttpResponseHeaders> headers = GetResponseHeaders();
855    if (network_delegate()) {
856      // Note that |this| may not be deleted until
857      // |on_headers_received_callback_| or
858      // |NetworkDelegate::URLRequestDestroyed()| has been called.
859      int error = network_delegate()->NotifyHeadersReceived(
860          request_,
861          on_headers_received_callback_,
862          headers.get(),
863          &override_response_headers_);
864      if (error != net::OK) {
865        if (error == net::ERR_IO_PENDING) {
866          awaiting_callback_ = true;
867          SetBlockedOnDelegate();
868        } else {
869          std::string source("delegate");
870          request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
871                                       NetLog::StringCallback("source",
872                                                              &source));
873          NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, error));
874        }
875        return;
876      }
877    }
878
879    SaveCookiesAndNotifyHeadersComplete(net::OK);
880  } else if (IsCertificateError(result)) {
881    // We encountered an SSL certificate error.  Ask our delegate to decide
882    // what we should do.
883
884    TransportSecurityState::DomainState domain_state;
885    const URLRequestContext* context = request_->context();
886    const bool fatal = context->transport_security_state() &&
887        context->transport_security_state()->GetDomainState(
888            request_info_.url.host(),
889            SSLConfigService::IsSNIAvailable(context->ssl_config_service()),
890            &domain_state) &&
891        domain_state.ShouldSSLErrorsBeFatal();
892    NotifySSLCertificateError(transaction_->GetResponseInfo()->ssl_info, fatal);
893  } else if (result == ERR_SSL_CLIENT_AUTH_CERT_NEEDED) {
894    NotifyCertificateRequested(
895        transaction_->GetResponseInfo()->cert_request_info.get());
896  } else {
897    NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
898  }
899}
900
901void URLRequestHttpJob::OnHeadersReceivedCallback(int result) {
902  SetUnblockedOnDelegate();
903  awaiting_callback_ = false;
904
905  // Check that there are no callbacks to already canceled requests.
906  DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status());
907
908  SaveCookiesAndNotifyHeadersComplete(result);
909}
910
911void URLRequestHttpJob::OnReadCompleted(int result) {
912  read_in_progress_ = false;
913
914  if (ShouldFixMismatchedContentLength(result))
915    result = OK;
916
917  if (result == OK) {
918    NotifyDone(URLRequestStatus());
919  } else if (result < 0) {
920    NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
921  } else {
922    // Clear the IO_PENDING status
923    SetStatus(URLRequestStatus());
924  }
925
926  NotifyReadComplete(result);
927}
928
929void URLRequestHttpJob::RestartTransactionWithAuth(
930    const AuthCredentials& credentials) {
931  auth_credentials_ = credentials;
932
933  // These will be reset in OnStartCompleted.
934  response_info_ = NULL;
935  receive_headers_end_ = base::TimeTicks();
936  response_cookies_.clear();
937
938  ResetTimer();
939
940  // Update the cookies, since the cookie store may have been updated from the
941  // headers in the 401/407. Since cookies were already appended to
942  // extra_headers, we need to strip them out before adding them again.
943  request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kCookie);
944
945  AddCookieHeaderAndStart();
946}
947
948void URLRequestHttpJob::SetUpload(UploadDataStream* upload) {
949  DCHECK(!transaction_.get()) << "cannot change once started";
950  request_info_.upload_data_stream = upload;
951}
952
953void URLRequestHttpJob::SetExtraRequestHeaders(
954    const HttpRequestHeaders& headers) {
955  DCHECK(!transaction_.get()) << "cannot change once started";
956  request_info_.extra_headers.CopyFrom(headers);
957}
958
959LoadState URLRequestHttpJob::GetLoadState() const {
960  return transaction_.get() ?
961      transaction_->GetLoadState() : LOAD_STATE_IDLE;
962}
963
964UploadProgress URLRequestHttpJob::GetUploadProgress() const {
965  return transaction_.get() ?
966      transaction_->GetUploadProgress() : UploadProgress();
967}
968
969bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const {
970  DCHECK(transaction_.get());
971
972  if (!response_info_)
973    return false;
974
975  return GetResponseHeaders()->GetMimeType(mime_type);
976}
977
978bool URLRequestHttpJob::GetCharset(std::string* charset) {
979  DCHECK(transaction_.get());
980
981  if (!response_info_)
982    return false;
983
984  return GetResponseHeaders()->GetCharset(charset);
985}
986
987void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) {
988  DCHECK(request_);
989  DCHECK(transaction_.get());
990
991  if (response_info_) {
992    *info = *response_info_;
993    if (override_response_headers_.get())
994      info->headers = override_response_headers_;
995  }
996}
997
998void URLRequestHttpJob::GetLoadTimingInfo(
999    LoadTimingInfo* load_timing_info) const {
1000  // If haven't made it far enough to receive any headers, don't return
1001  // anything.  This makes for more consistent behavior in the case of errors.
1002  if (!transaction_ || receive_headers_end_.is_null())
1003    return;
1004  if (transaction_->GetLoadTimingInfo(load_timing_info))
1005    load_timing_info->receive_headers_end = receive_headers_end_;
1006}
1007
1008bool URLRequestHttpJob::GetResponseCookies(std::vector<std::string>* cookies) {
1009  DCHECK(transaction_.get());
1010
1011  if (!response_info_)
1012    return false;
1013
1014  // TODO(darin): Why are we extracting response cookies again?  Perhaps we
1015  // should just leverage response_cookies_.
1016
1017  cookies->clear();
1018  FetchResponseCookies(cookies);
1019  return true;
1020}
1021
1022int URLRequestHttpJob::GetResponseCode() const {
1023  DCHECK(transaction_.get());
1024
1025  if (!response_info_)
1026    return -1;
1027
1028  return GetResponseHeaders()->response_code();
1029}
1030
1031Filter* URLRequestHttpJob::SetupFilter() const {
1032  DCHECK(transaction_.get());
1033  if (!response_info_)
1034    return NULL;
1035
1036  std::vector<Filter::FilterType> encoding_types;
1037  std::string encoding_type;
1038  HttpResponseHeaders* headers = GetResponseHeaders();
1039  void* iter = NULL;
1040  while (headers->EnumerateHeader(&iter, "Content-Encoding", &encoding_type)) {
1041    encoding_types.push_back(Filter::ConvertEncodingToType(encoding_type));
1042  }
1043
1044  if (filter_context_->IsSdchResponse()) {
1045    // We are wary of proxies that discard or damage SDCH encoding.  If a server
1046    // explicitly states that this is not SDCH content, then we can correct our
1047    // assumption that this is an SDCH response, and avoid the need to recover
1048    // as though the content is corrupted (when we discover it is not SDCH
1049    // encoded).
1050    std::string sdch_response_status;
1051    iter = NULL;
1052    while (headers->EnumerateHeader(&iter, "X-Sdch-Encode",
1053                                    &sdch_response_status)) {
1054      if (sdch_response_status == "0") {
1055        filter_context_->ResetSdchResponseToFalse();
1056        break;
1057      }
1058    }
1059  }
1060
1061  // Even if encoding types are empty, there is a chance that we need to add
1062  // some decoding, as some proxies strip encoding completely. In such cases,
1063  // we may need to add (for example) SDCH filtering (when the context suggests
1064  // it is appropriate).
1065  Filter::FixupEncodingTypes(*filter_context_, &encoding_types);
1066
1067  return !encoding_types.empty()
1068      ? Filter::Factory(encoding_types, *filter_context_) : NULL;
1069}
1070
1071bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) {
1072  // HTTP is always safe.
1073  // TODO(pauljensen): Remove once crbug.com/146591 is fixed.
1074  if (location.is_valid() &&
1075      (location.scheme() == "http" || location.scheme() == "https")) {
1076    return true;
1077  }
1078  // Query URLRequestJobFactory as to whether |location| would be safe to
1079  // redirect to.
1080  return request_->context()->job_factory() &&
1081      request_->context()->job_factory()->IsSafeRedirectTarget(location);
1082}
1083
1084bool URLRequestHttpJob::NeedsAuth() {
1085  int code = GetResponseCode();
1086  if (code == -1)
1087    return false;
1088
1089  // Check if we need either Proxy or WWW Authentication.  This could happen
1090  // because we either provided no auth info, or provided incorrect info.
1091  switch (code) {
1092    case 407:
1093      if (proxy_auth_state_ == AUTH_STATE_CANCELED)
1094        return false;
1095      proxy_auth_state_ = AUTH_STATE_NEED_AUTH;
1096      return true;
1097    case 401:
1098      if (server_auth_state_ == AUTH_STATE_CANCELED)
1099        return false;
1100      server_auth_state_ = AUTH_STATE_NEED_AUTH;
1101      return true;
1102  }
1103  return false;
1104}
1105
1106void URLRequestHttpJob::GetAuthChallengeInfo(
1107    scoped_refptr<AuthChallengeInfo>* result) {
1108  DCHECK(transaction_.get());
1109  DCHECK(response_info_);
1110
1111  // sanity checks:
1112  DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH ||
1113         server_auth_state_ == AUTH_STATE_NEED_AUTH);
1114  DCHECK((GetResponseHeaders()->response_code() == HTTP_UNAUTHORIZED) ||
1115         (GetResponseHeaders()->response_code() ==
1116          HTTP_PROXY_AUTHENTICATION_REQUIRED));
1117
1118  *result = response_info_->auth_challenge;
1119}
1120
1121void URLRequestHttpJob::SetAuth(const AuthCredentials& credentials) {
1122  DCHECK(transaction_.get());
1123
1124  // Proxy gets set first, then WWW.
1125  if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
1126    proxy_auth_state_ = AUTH_STATE_HAVE_AUTH;
1127  } else {
1128    DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH);
1129    server_auth_state_ = AUTH_STATE_HAVE_AUTH;
1130  }
1131
1132  RestartTransactionWithAuth(credentials);
1133}
1134
1135void URLRequestHttpJob::CancelAuth() {
1136  // Proxy gets set first, then WWW.
1137  if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
1138    proxy_auth_state_ = AUTH_STATE_CANCELED;
1139  } else {
1140    DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH);
1141    server_auth_state_ = AUTH_STATE_CANCELED;
1142  }
1143
1144  // These will be reset in OnStartCompleted.
1145  response_info_ = NULL;
1146  receive_headers_end_ = base::TimeTicks::Now();
1147  response_cookies_.clear();
1148
1149  ResetTimer();
1150
1151  // OK, let the consumer read the error page...
1152  //
1153  // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false,
1154  // which will cause the consumer to receive OnResponseStarted instead of
1155  // OnAuthRequired.
1156  //
1157  // We have to do this via InvokeLater to avoid "recursing" the consumer.
1158  //
1159  base::MessageLoop::current()->PostTask(
1160      FROM_HERE,
1161      base::Bind(&URLRequestHttpJob::OnStartCompleted,
1162                 weak_factory_.GetWeakPtr(), OK));
1163}
1164
1165void URLRequestHttpJob::ContinueWithCertificate(
1166    X509Certificate* client_cert) {
1167  DCHECK(transaction_.get());
1168
1169  DCHECK(!response_info_) << "should not have a response yet";
1170  receive_headers_end_ = base::TimeTicks();
1171
1172  ResetTimer();
1173
1174  // No matter what, we want to report our status as IO pending since we will
1175  // be notifying our consumer asynchronously via OnStartCompleted.
1176  SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
1177
1178  int rv = transaction_->RestartWithCertificate(client_cert, start_callback_);
1179  if (rv == ERR_IO_PENDING)
1180    return;
1181
1182  // The transaction started synchronously, but we need to notify the
1183  // URLRequest delegate via the message loop.
1184  base::MessageLoop::current()->PostTask(
1185      FROM_HERE,
1186      base::Bind(&URLRequestHttpJob::OnStartCompleted,
1187                 weak_factory_.GetWeakPtr(), rv));
1188}
1189
1190void URLRequestHttpJob::ContinueDespiteLastError() {
1191  // If the transaction was destroyed, then the job was cancelled.
1192  if (!transaction_.get())
1193    return;
1194
1195  DCHECK(!response_info_) << "should not have a response yet";
1196  receive_headers_end_ = base::TimeTicks();
1197
1198  ResetTimer();
1199
1200  // No matter what, we want to report our status as IO pending since we will
1201  // be notifying our consumer asynchronously via OnStartCompleted.
1202  SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
1203
1204  int rv = transaction_->RestartIgnoringLastError(start_callback_);
1205  if (rv == ERR_IO_PENDING)
1206    return;
1207
1208  // The transaction started synchronously, but we need to notify the
1209  // URLRequest delegate via the message loop.
1210  base::MessageLoop::current()->PostTask(
1211      FROM_HERE,
1212      base::Bind(&URLRequestHttpJob::OnStartCompleted,
1213                 weak_factory_.GetWeakPtr(), rv));
1214}
1215
1216bool URLRequestHttpJob::ShouldFixMismatchedContentLength(int rv) const {
1217  // Some servers send the body compressed, but specify the content length as
1218  // the uncompressed size.  Although this violates the HTTP spec we want to
1219  // support it (as IE and FireFox do), but *only* for an exact match.
1220  // See http://crbug.com/79694.
1221  if (rv == net::ERR_CONTENT_LENGTH_MISMATCH ||
1222      rv == net::ERR_INCOMPLETE_CHUNKED_ENCODING) {
1223    if (request_ && request_->response_headers()) {
1224      int64 expected_length = request_->response_headers()->GetContentLength();
1225      VLOG(1) << __FUNCTION__ << "() "
1226              << "\"" << request_->url().spec() << "\""
1227              << " content-length = " << expected_length
1228              << " pre total = " << prefilter_bytes_read()
1229              << " post total = " << postfilter_bytes_read();
1230      if (postfilter_bytes_read() == expected_length) {
1231        // Clear the error.
1232        return true;
1233      }
1234    }
1235  }
1236  return false;
1237}
1238
1239bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size,
1240                                    int* bytes_read) {
1241  DCHECK_NE(buf_size, 0);
1242  DCHECK(bytes_read);
1243  DCHECK(!read_in_progress_);
1244
1245  int rv = transaction_->Read(
1246      buf, buf_size,
1247      base::Bind(&URLRequestHttpJob::OnReadCompleted, base::Unretained(this)));
1248
1249  if (ShouldFixMismatchedContentLength(rv))
1250    rv = 0;
1251
1252  if (rv >= 0) {
1253    *bytes_read = rv;
1254    if (!rv)
1255      DoneWithRequest(FINISHED);
1256    return true;
1257  }
1258
1259  if (rv == ERR_IO_PENDING) {
1260    read_in_progress_ = true;
1261    SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
1262  } else {
1263    NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
1264  }
1265
1266  return false;
1267}
1268
1269void URLRequestHttpJob::StopCaching() {
1270  if (transaction_.get())
1271    transaction_->StopCaching();
1272}
1273
1274void URLRequestHttpJob::DoneReading() {
1275  if (transaction_.get())
1276    transaction_->DoneReading();
1277  DoneWithRequest(FINISHED);
1278}
1279
1280HostPortPair URLRequestHttpJob::GetSocketAddress() const {
1281  return response_info_ ? response_info_->socket_address : HostPortPair();
1282}
1283
1284void URLRequestHttpJob::RecordTimer() {
1285  if (request_creation_time_.is_null()) {
1286    NOTREACHED()
1287        << "The same transaction shouldn't start twice without new timing.";
1288    return;
1289  }
1290
1291  base::TimeDelta to_start = base::Time::Now() - request_creation_time_;
1292  request_creation_time_ = base::Time();
1293
1294  UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte", to_start);
1295}
1296
1297void URLRequestHttpJob::ResetTimer() {
1298  if (!request_creation_time_.is_null()) {
1299    NOTREACHED()
1300        << "The timer was reset before it was recorded.";
1301    return;
1302  }
1303  request_creation_time_ = base::Time::Now();
1304}
1305
1306void URLRequestHttpJob::UpdatePacketReadTimes() {
1307  if (!packet_timing_enabled_)
1308    return;
1309
1310  if (filter_input_byte_count() <= bytes_observed_in_packets_) {
1311    DCHECK_EQ(filter_input_byte_count(), bytes_observed_in_packets_);
1312    return;  // No new bytes have arrived.
1313  }
1314
1315  final_packet_time_ = base::Time::Now();
1316  if (!bytes_observed_in_packets_)
1317    request_time_snapshot_ = request_ ? request_->request_time() : base::Time();
1318
1319  bytes_observed_in_packets_ = filter_input_byte_count();
1320}
1321
1322void URLRequestHttpJob::RecordPacketStats(
1323    FilterContext::StatisticSelector statistic) const {
1324  if (!packet_timing_enabled_ || (final_packet_time_ == base::Time()))
1325    return;
1326
1327  base::TimeDelta duration = final_packet_time_ - request_time_snapshot_;
1328  switch (statistic) {
1329    case FilterContext::SDCH_DECODE: {
1330      UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b",
1331          static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100);
1332      return;
1333    }
1334    case FilterContext::SDCH_PASSTHROUGH: {
1335      // Despite advertising a dictionary, we handled non-sdch compressed
1336      // content.
1337      return;
1338    }
1339
1340    case FilterContext::SDCH_EXPERIMENT_DECODE: {
1341      UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Decode",
1342                                  duration,
1343                                  base::TimeDelta::FromMilliseconds(20),
1344                                  base::TimeDelta::FromMinutes(10), 100);
1345      return;
1346    }
1347    case FilterContext::SDCH_EXPERIMENT_HOLDBACK: {
1348      UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Holdback",
1349                                  duration,
1350                                  base::TimeDelta::FromMilliseconds(20),
1351                                  base::TimeDelta::FromMinutes(10), 100);
1352      return;
1353    }
1354    default:
1355      NOTREACHED();
1356      return;
1357  }
1358}
1359
1360// The common type of histogram we use for all compression-tracking histograms.
1361#define COMPRESSION_HISTOGRAM(name, sample) \
1362    do { \
1363      UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \
1364                                  500, 1000000, 100); \
1365    } while (0)
1366
1367void URLRequestHttpJob::RecordCompressionHistograms() {
1368  DCHECK(request_);
1369  if (!request_)
1370    return;
1371
1372  if (is_cached_content_ ||                // Don't record cached content
1373      !GetStatus().is_success() ||         // Don't record failed content
1374      !IsCompressibleContent() ||          // Only record compressible content
1375      !prefilter_bytes_read())       // Zero-byte responses aren't useful.
1376    return;
1377
1378  // Miniature requests aren't really compressible.  Don't count them.
1379  const int kMinSize = 16;
1380  if (prefilter_bytes_read() < kMinSize)
1381    return;
1382
1383  // Only record for http or https urls.
1384  bool is_http = request_->url().SchemeIs("http");
1385  bool is_https = request_->url().SchemeIs("https");
1386  if (!is_http && !is_https)
1387    return;
1388
1389  int compressed_B = prefilter_bytes_read();
1390  int decompressed_B = postfilter_bytes_read();
1391  bool was_filtered = HasFilter();
1392
1393  // We want to record how often downloaded resources are compressed.
1394  // But, we recognize that different protocols may have different
1395  // properties.  So, for each request, we'll put it into one of 3
1396  // groups:
1397  //      a) SSL resources
1398  //         Proxies cannot tamper with compression headers with SSL.
1399  //      b) Non-SSL, loaded-via-proxy resources
1400  //         In this case, we know a proxy might have interfered.
1401  //      c) Non-SSL, loaded-without-proxy resources
1402  //         In this case, we know there was no explicit proxy.  However,
1403  //         it is possible that a transparent proxy was still interfering.
1404  //
1405  // For each group, we record the same 3 histograms.
1406
1407  if (is_https) {
1408    if (was_filtered) {
1409      COMPRESSION_HISTOGRAM("SSL.BytesBeforeCompression", compressed_B);
1410      COMPRESSION_HISTOGRAM("SSL.BytesAfterCompression", decompressed_B);
1411    } else {
1412      COMPRESSION_HISTOGRAM("SSL.ShouldHaveBeenCompressed", decompressed_B);
1413    }
1414    return;
1415  }
1416
1417  if (request_->was_fetched_via_proxy()) {
1418    if (was_filtered) {
1419      COMPRESSION_HISTOGRAM("Proxy.BytesBeforeCompression", compressed_B);
1420      COMPRESSION_HISTOGRAM("Proxy.BytesAfterCompression", decompressed_B);
1421    } else {
1422      COMPRESSION_HISTOGRAM("Proxy.ShouldHaveBeenCompressed", decompressed_B);
1423    }
1424    return;
1425  }
1426
1427  if (was_filtered) {
1428    COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B);
1429    COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B);
1430  } else {
1431    COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B);
1432  }
1433}
1434
1435bool URLRequestHttpJob::IsCompressibleContent() const {
1436  std::string mime_type;
1437  return GetMimeType(&mime_type) &&
1438      (IsSupportedJavascriptMimeType(mime_type.c_str()) ||
1439       IsSupportedNonImageMimeType(mime_type.c_str()));
1440}
1441
1442void URLRequestHttpJob::RecordPerfHistograms(CompletionCause reason) {
1443  if (start_time_.is_null())
1444    return;
1445
1446  base::TimeDelta total_time = base::TimeTicks::Now() - start_time_;
1447  UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTime", total_time);
1448
1449  if (reason == FINISHED) {
1450    UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeSuccess", total_time);
1451  } else {
1452    UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCancel", total_time);
1453  }
1454
1455  if (response_info_) {
1456    if (response_info_->was_cached) {
1457      UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCached", total_time);
1458    } else  {
1459      UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeNotCached", total_time);
1460    }
1461  }
1462
1463  start_time_ = base::TimeTicks();
1464}
1465
1466void URLRequestHttpJob::DoneWithRequest(CompletionCause reason) {
1467  if (done_)
1468    return;
1469  done_ = true;
1470  RecordPerfHistograms(reason);
1471  if (reason == FINISHED) {
1472    request_->set_received_response_content_length(prefilter_bytes_read());
1473    RecordCompressionHistograms();
1474  }
1475}
1476
1477HttpResponseHeaders* URLRequestHttpJob::GetResponseHeaders() const {
1478  DCHECK(transaction_.get());
1479  DCHECK(transaction_->GetResponseInfo());
1480  return override_response_headers_.get() ?
1481             override_response_headers_.get() :
1482             transaction_->GetResponseInfo()->headers.get();
1483}
1484
1485void URLRequestHttpJob::NotifyURLRequestDestroyed() {
1486  awaiting_callback_ = false;
1487}
1488
1489void URLRequestHttpJob::OnDetachRequest() {
1490  http_transaction_delegate_->OnDetachRequest();
1491}
1492
1493}  // namespace net
1494