url_request_http_job.cc revision 5821806d5e7f356e8fa4b058a389a808ea183019
1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/url_request/url_request_http_job.h"
6
7#include "base/base_switches.h"
8#include "base/bind.h"
9#include "base/bind_helpers.h"
10#include "base/command_line.h"
11#include "base/compiler_specific.h"
12#include "base/file_util.h"
13#include "base/file_version_info.h"
14#include "base/message_loop.h"
15#include "base/metrics/field_trial.h"
16#include "base/metrics/histogram.h"
17#include "base/rand_util.h"
18#include "base/string_util.h"
19#include "base/time.h"
20#include "net/base/cert_status_flags.h"
21#include "net/base/filter.h"
22#include "net/base/host_port_pair.h"
23#include "net/base/load_flags.h"
24#include "net/base/mime_util.h"
25#include "net/base/net_errors.h"
26#include "net/base/net_util.h"
27#include "net/base/network_delegate.h"
28#include "net/base/sdch_manager.h"
29#include "net/base/ssl_cert_request_info.h"
30#include "net/base/ssl_config_service.h"
31#include "net/cookies/cookie_monster.h"
32#include "net/http/http_network_session.h"
33#include "net/http/http_request_headers.h"
34#include "net/http/http_response_headers.h"
35#include "net/http/http_response_info.h"
36#include "net/http/http_status_code.h"
37#include "net/http/http_transaction.h"
38#include "net/http/http_transaction_delegate.h"
39#include "net/http/http_transaction_factory.h"
40#include "net/http/http_util.h"
41#include "net/url_request/fraudulent_certificate_reporter.h"
42#include "net/url_request/http_user_agent_settings.h"
43#include "net/url_request/url_request.h"
44#include "net/url_request/url_request_context.h"
45#include "net/url_request/url_request_error_job.h"
46#include "net/url_request/url_request_redirect_job.h"
47#include "net/url_request/url_request_throttler_header_adapter.h"
48#include "net/url_request/url_request_throttler_manager.h"
49
50static const char kAvailDictionaryHeader[] = "Avail-Dictionary";
51
52namespace net {
53
54class URLRequestHttpJob::HttpFilterContext : public FilterContext {
55 public:
56  explicit HttpFilterContext(URLRequestHttpJob* job);
57  virtual ~HttpFilterContext();
58
59  // FilterContext implementation.
60  virtual bool GetMimeType(std::string* mime_type) const OVERRIDE;
61  virtual bool GetURL(GURL* gurl) const OVERRIDE;
62  virtual base::Time GetRequestTime() const OVERRIDE;
63  virtual bool IsCachedContent() const OVERRIDE;
64  virtual bool IsDownload() const OVERRIDE;
65  virtual bool IsSdchResponse() const OVERRIDE;
66  virtual int64 GetByteReadCount() const OVERRIDE;
67  virtual int GetResponseCode() const OVERRIDE;
68  virtual void RecordPacketStats(StatisticSelector statistic) const OVERRIDE;
69
70  // Method to allow us to reset filter context for a response that should have
71  // been SDCH encoded when there is an update due to an explicit HTTP header.
72  void ResetSdchResponseToFalse();
73
74 private:
75  URLRequestHttpJob* job_;
76
77  DISALLOW_COPY_AND_ASSIGN(HttpFilterContext);
78};
79
80class URLRequestHttpJob::HttpTransactionDelegateImpl
81    : public HttpTransactionDelegate {
82 public:
83  explicit HttpTransactionDelegateImpl(URLRequest* request)
84      : request_(request),
85        network_delegate_(request->context()->network_delegate()),
86        cache_active_(false),
87        network_active_(false) {
88  }
89  virtual ~HttpTransactionDelegateImpl() {
90    OnDetachRequest();
91  }
92  void OnDetachRequest() {
93    if (request_ == NULL || network_delegate_ == NULL)
94      return;
95    network_delegate_->NotifyRequestWaitStateChange(
96        *request_,
97        NetworkDelegate::REQUEST_WAIT_STATE_RESET);
98    cache_active_ = false;
99    network_active_ = false;
100    request_ = NULL;
101  }
102  virtual void OnCacheActionStart() OVERRIDE {
103    if (request_ == NULL || network_delegate_ == NULL)
104      return;
105    DCHECK(!cache_active_ && !network_active_);
106    cache_active_ = true;
107    network_delegate_->NotifyRequestWaitStateChange(
108        *request_,
109        NetworkDelegate::REQUEST_WAIT_STATE_CACHE_START);
110  }
111  virtual void OnCacheActionFinish() OVERRIDE {
112    if (request_ == NULL || network_delegate_ == NULL)
113      return;
114    DCHECK(cache_active_ && !network_active_);
115    cache_active_ = false;
116    network_delegate_->NotifyRequestWaitStateChange(
117        *request_,
118        NetworkDelegate::REQUEST_WAIT_STATE_CACHE_FINISH);
119  }
120  virtual void OnNetworkActionStart() OVERRIDE {
121    if (request_ == NULL || network_delegate_ == NULL)
122      return;
123    DCHECK(!cache_active_ && !network_active_);
124    network_active_ = true;
125    network_delegate_->NotifyRequestWaitStateChange(
126        *request_,
127        NetworkDelegate::REQUEST_WAIT_STATE_NETWORK_START);
128  }
129  virtual void OnNetworkActionFinish() OVERRIDE {
130    if (request_ == NULL || network_delegate_ == NULL)
131      return;
132    DCHECK(!cache_active_ && network_active_);
133    network_active_ = false;
134    network_delegate_->NotifyRequestWaitStateChange(
135        *request_,
136        NetworkDelegate::REQUEST_WAIT_STATE_NETWORK_FINISH);
137  }
138 private:
139  URLRequest* request_;
140  NetworkDelegate* network_delegate_;
141  bool cache_active_;
142  bool network_active_;
143};
144
145URLRequestHttpJob::HttpFilterContext::HttpFilterContext(URLRequestHttpJob* job)
146    : job_(job) {
147  DCHECK(job_);
148}
149
150URLRequestHttpJob::HttpFilterContext::~HttpFilterContext() {
151}
152
153bool URLRequestHttpJob::HttpFilterContext::GetMimeType(
154    std::string* mime_type) const {
155  return job_->GetMimeType(mime_type);
156}
157
158bool URLRequestHttpJob::HttpFilterContext::GetURL(GURL* gurl) const {
159  if (!job_->request())
160    return false;
161  *gurl = job_->request()->url();
162  return true;
163}
164
165base::Time URLRequestHttpJob::HttpFilterContext::GetRequestTime() const {
166  return job_->request() ? job_->request()->request_time() : base::Time();
167}
168
169bool URLRequestHttpJob::HttpFilterContext::IsCachedContent() const {
170  return job_->is_cached_content_;
171}
172
173bool URLRequestHttpJob::HttpFilterContext::IsDownload() const {
174  return (job_->request_info_.load_flags & LOAD_IS_DOWNLOAD) != 0;
175}
176
177void URLRequestHttpJob::HttpFilterContext::ResetSdchResponseToFalse() {
178  DCHECK(job_->sdch_dictionary_advertised_);
179  job_->sdch_dictionary_advertised_ = false;
180}
181
182bool URLRequestHttpJob::HttpFilterContext::IsSdchResponse() const {
183  return job_->sdch_dictionary_advertised_;
184}
185
186int64 URLRequestHttpJob::HttpFilterContext::GetByteReadCount() const {
187  return job_->filter_input_byte_count();
188}
189
190int URLRequestHttpJob::HttpFilterContext::GetResponseCode() const {
191  return job_->GetResponseCode();
192}
193
194void URLRequestHttpJob::HttpFilterContext::RecordPacketStats(
195    StatisticSelector statistic) const {
196  job_->RecordPacketStats(statistic);
197}
198
199// TODO(darin): make sure the port blocking code is not lost
200// static
201URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request,
202                                          NetworkDelegate* network_delegate,
203                                          const std::string& scheme) {
204  DCHECK(scheme == "http" || scheme == "https");
205
206  if (!request->context()->http_transaction_factory()) {
207    NOTREACHED() << "requires a valid context";
208    return new URLRequestErrorJob(
209        request, network_delegate, ERR_INVALID_ARGUMENT);
210  }
211
212  GURL redirect_url;
213  if (request->GetHSTSRedirect(&redirect_url))
214    return new URLRequestRedirectJob(request, network_delegate, redirect_url);
215  return new URLRequestHttpJob(request,
216                               network_delegate,
217                               request->context()->http_user_agent_settings());
218}
219
220
221URLRequestHttpJob::URLRequestHttpJob(
222    URLRequest* request,
223    NetworkDelegate* network_delegate,
224    const HttpUserAgentSettings* http_user_agent_settings)
225    : URLRequestJob(request, network_delegate),
226      response_info_(NULL),
227      response_cookies_save_index_(0),
228      proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH),
229      server_auth_state_(AUTH_STATE_DONT_NEED_AUTH),
230      ALLOW_THIS_IN_INITIALIZER_LIST(start_callback_(
231          base::Bind(&URLRequestHttpJob::OnStartCompleted,
232                     base::Unretained(this)))),
233      ALLOW_THIS_IN_INITIALIZER_LIST(notify_before_headers_sent_callback_(
234          base::Bind(&URLRequestHttpJob::NotifyBeforeSendHeadersCallback,
235                     base::Unretained(this)))),
236      read_in_progress_(false),
237      transaction_(NULL),
238      throttling_entry_(NULL),
239      sdch_dictionary_advertised_(false),
240      sdch_test_activated_(false),
241      sdch_test_control_(false),
242      is_cached_content_(false),
243      request_creation_time_(),
244      packet_timing_enabled_(false),
245      done_(false),
246      bytes_observed_in_packets_(0),
247      request_time_snapshot_(),
248      final_packet_time_(),
249      ALLOW_THIS_IN_INITIALIZER_LIST(
250          filter_context_(new HttpFilterContext(this))),
251      ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)),
252      ALLOW_THIS_IN_INITIALIZER_LIST(on_headers_received_callback_(
253          base::Bind(&URLRequestHttpJob::OnHeadersReceivedCallback,
254                     base::Unretained(this)))),
255      awaiting_callback_(false),
256      http_transaction_delegate_(new HttpTransactionDelegateImpl(request)),
257      http_user_agent_settings_(http_user_agent_settings) {
258  URLRequestThrottlerManager* manager = request->context()->throttler_manager();
259  if (manager)
260    throttling_entry_ = manager->RegisterRequestUrl(request->url());
261
262  ResetTimer();
263}
264
265void URLRequestHttpJob::NotifyHeadersComplete() {
266  DCHECK(!response_info_);
267
268  response_info_ = transaction_->GetResponseInfo();
269
270  // Save boolean, as we'll need this info at destruction time, and filters may
271  // also need this info.
272  is_cached_content_ = response_info_->was_cached;
273
274  if (!is_cached_content_ && throttling_entry_) {
275    URLRequestThrottlerHeaderAdapter response_adapter(GetResponseHeaders());
276    throttling_entry_->UpdateWithResponse(request_info_.url.host(),
277                                          &response_adapter);
278  }
279
280  // The ordering of these calls is not important.
281  ProcessStrictTransportSecurityHeader();
282  ProcessPublicKeyPinsHeader();
283
284  if (SdchManager::Global() &&
285      SdchManager::Global()->IsInSupportedDomain(request_->url())) {
286    const std::string name = "Get-Dictionary";
287    std::string url_text;
288    void* iter = NULL;
289    // TODO(jar): We need to not fetch dictionaries the first time they are
290    // seen, but rather wait until we can justify their usefulness.
291    // For now, we will only fetch the first dictionary, which will at least
292    // require multiple suggestions before we get additional ones for this site.
293    // Eventually we should wait until a dictionary is requested several times
294    // before we even download it (so that we don't waste memory or bandwidth).
295    if (GetResponseHeaders()->EnumerateHeader(&iter, name, &url_text)) {
296      // request_->url() won't be valid in the destructor, so we use an
297      // alternate copy.
298      DCHECK_EQ(request_->url(), request_info_.url);
299      // Resolve suggested URL relative to request url.
300      sdch_dictionary_url_ = request_info_.url.Resolve(url_text);
301    }
302  }
303
304  // The HTTP transaction may be restarted several times for the purposes
305  // of sending authorization information. Each time it restarts, we get
306  // notified of the headers completion so that we can update the cookie store.
307  if (transaction_->IsReadyToRestartForAuth()) {
308    DCHECK(!response_info_->auth_challenge.get());
309    // TODO(battre): This breaks the webrequest API for
310    // URLRequestTestHTTP.BasicAuthWithCookies
311    // where OnBeforeSendHeaders -> OnSendHeaders -> OnBeforeSendHeaders
312    // occurs.
313    RestartTransactionWithAuth(AuthCredentials());
314    return;
315  }
316
317  URLRequestJob::NotifyHeadersComplete();
318}
319
320void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) {
321  DoneWithRequest(FINISHED);
322  URLRequestJob::NotifyDone(status);
323}
324
325void URLRequestHttpJob::DestroyTransaction() {
326  DCHECK(transaction_.get());
327
328  DoneWithRequest(ABORTED);
329  transaction_.reset();
330  response_info_ = NULL;
331}
332
333void URLRequestHttpJob::StartTransaction() {
334  if (request_->context()->network_delegate()) {
335    int rv = request_->context()->network_delegate()->NotifyBeforeSendHeaders(
336        request_, notify_before_headers_sent_callback_,
337        &request_info_.extra_headers);
338    // If an extension blocks the request, we rely on the callback to
339    // MaybeStartTransactionInternal().
340    if (rv == ERR_IO_PENDING) {
341      SetBlockedOnDelegate();
342      return;
343    }
344    MaybeStartTransactionInternal(rv);
345    return;
346  }
347  StartTransactionInternal();
348}
349
350void URLRequestHttpJob::NotifyBeforeSendHeadersCallback(int result) {
351  SetUnblockedOnDelegate();
352
353  // Check that there are no callbacks to already canceled requests.
354  DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status());
355
356  MaybeStartTransactionInternal(result);
357}
358
359void URLRequestHttpJob::MaybeStartTransactionInternal(int result) {
360  if (result == OK) {
361    StartTransactionInternal();
362  } else {
363    std::string source("delegate");
364    request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
365                                 NetLog::StringCallback("source", &source));
366    NotifyCanceled();
367    NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
368  }
369}
370
371void URLRequestHttpJob::StartTransactionInternal() {
372  // NOTE: This method assumes that request_info_ is already setup properly.
373
374  // If we already have a transaction, then we should restart the transaction
375  // with auth provided by auth_credentials_.
376
377  int rv;
378
379  if (request_->context()->network_delegate()) {
380    request_->context()->network_delegate()->NotifySendHeaders(
381        request_, request_info_.extra_headers);
382  }
383
384  if (transaction_.get()) {
385    rv = transaction_->RestartWithAuth(auth_credentials_, start_callback_);
386    auth_credentials_ = AuthCredentials();
387  } else {
388    DCHECK(request_->context()->http_transaction_factory());
389
390    rv = request_->context()->http_transaction_factory()->CreateTransaction(
391        &transaction_, http_transaction_delegate_.get());
392    if (rv == OK) {
393      if (!throttling_entry_ ||
394          !throttling_entry_->ShouldRejectRequest(*request_)) {
395        rv = transaction_->Start(
396            &request_info_, start_callback_, request_->net_log());
397        start_time_ = base::TimeTicks::Now();
398      } else {
399        // Special error code for the exponential back-off module.
400        rv = ERR_TEMPORARILY_THROTTLED;
401      }
402    }
403  }
404
405  if (rv == ERR_IO_PENDING)
406    return;
407
408  // The transaction started synchronously, but we need to notify the
409  // URLRequest delegate via the message loop.
410  MessageLoop::current()->PostTask(
411      FROM_HERE,
412      base::Bind(&URLRequestHttpJob::OnStartCompleted,
413                 weak_factory_.GetWeakPtr(), rv));
414}
415
416void URLRequestHttpJob::AddExtraHeaders() {
417  // Supply Accept-Encoding field only if it is not already provided.
418  // It should be provided IF the content is known to have restrictions on
419  // potential encoding, such as streaming multi-media.
420  // For details see bug 47381.
421  // TODO(jar, enal): jpeg files etc. should set up a request header if
422  // possible. Right now it is done only by buffered_resource_loader and
423  // simple_data_source.
424  if (!request_info_.extra_headers.HasHeader(
425      HttpRequestHeaders::kAcceptEncoding)) {
426    bool advertise_sdch = SdchManager::Global() &&
427        SdchManager::Global()->IsInSupportedDomain(request_->url());
428    std::string avail_dictionaries;
429    if (advertise_sdch) {
430      SdchManager::Global()->GetAvailDictionaryList(request_->url(),
431                                                    &avail_dictionaries);
432
433      // The AllowLatencyExperiment() is only true if we've successfully done a
434      // full SDCH compression recently in this browser session for this host.
435      // Note that for this path, there might be no applicable dictionaries,
436      // and hence we can't participate in the experiment.
437      if (!avail_dictionaries.empty() &&
438          SdchManager::Global()->AllowLatencyExperiment(request_->url())) {
439        // We are participating in the test (or control), and hence we'll
440        // eventually record statistics via either SDCH_EXPERIMENT_DECODE or
441        // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data.
442        packet_timing_enabled_ = true;
443        if (base::RandDouble() < .01) {
444          sdch_test_control_ = true;  // 1% probability.
445          advertise_sdch = false;
446        } else {
447          sdch_test_activated_ = true;
448        }
449      }
450    }
451
452    // Supply Accept-Encoding headers first so that it is more likely that they
453    // will be in the first transmitted packet.  This can sometimes make it
454    // easier to filter and analyze the streams to assure that a proxy has not
455    // damaged these headers.  Some proxies deliberately corrupt Accept-Encoding
456    // headers.
457    if (!advertise_sdch) {
458      // Tell the server what compression formats we support (other than SDCH).
459      request_info_.extra_headers.SetHeader(
460          HttpRequestHeaders::kAcceptEncoding, "gzip,deflate");
461    } else {
462      // Include SDCH in acceptable list.
463      request_info_.extra_headers.SetHeader(
464          HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch");
465      if (!avail_dictionaries.empty()) {
466        request_info_.extra_headers.SetHeader(
467            kAvailDictionaryHeader,
468            avail_dictionaries);
469        sdch_dictionary_advertised_ = true;
470        // Since we're tagging this transaction as advertising a dictionary,
471        // we'll definitely employ an SDCH filter (or tentative sdch filter)
472        // when we get a response.  When done, we'll record histograms via
473        // SDCH_DECODE or SDCH_PASSTHROUGH.  Hence we need to record packet
474        // arrival times.
475        packet_timing_enabled_ = true;
476      }
477    }
478  }
479
480  if (http_user_agent_settings_) {
481    // Only add default Accept-Language and Accept-Charset if the request
482    // didn't have them specified.
483    std::string accept_language =
484        http_user_agent_settings_->GetAcceptLanguage();
485    if (!accept_language.empty()) {
486      request_info_.extra_headers.SetHeaderIfMissing(
487          HttpRequestHeaders::kAcceptLanguage,
488          accept_language);
489    }
490    std::string accept_charset = http_user_agent_settings_->GetAcceptCharset();
491    if (!accept_charset.empty()) {
492      request_info_.extra_headers.SetHeaderIfMissing(
493          HttpRequestHeaders::kAcceptCharset,
494          accept_charset);
495    }
496  }
497}
498
499void URLRequestHttpJob::AddCookieHeaderAndStart() {
500  // No matter what, we want to report our status as IO pending since we will
501  // be notifying our consumer asynchronously via OnStartCompleted.
502  SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
503
504  // If the request was destroyed, then there is no more work to do.
505  if (!request_)
506    return;
507
508  CookieStore* cookie_store = request_->context()->cookie_store();
509  if (cookie_store && !(request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES)) {
510    net::CookieMonster* cookie_monster = cookie_store->GetCookieMonster();
511    if (cookie_monster) {
512      cookie_monster->GetAllCookiesForURLAsync(
513          request_->url(),
514          base::Bind(&URLRequestHttpJob::CheckCookiePolicyAndLoad,
515                     weak_factory_.GetWeakPtr()));
516    } else {
517      DoLoadCookies();
518    }
519  } else {
520    DoStartTransaction();
521  }
522}
523
524void URLRequestHttpJob::DoLoadCookies() {
525  CookieOptions options;
526  options.set_include_httponly();
527  request_->context()->cookie_store()->GetCookiesWithInfoAsync(
528      request_->url(), options,
529      base::Bind(&URLRequestHttpJob::OnCookiesLoaded,
530                 weak_factory_.GetWeakPtr()));
531}
532
533void URLRequestHttpJob::CheckCookiePolicyAndLoad(
534    const CookieList& cookie_list) {
535  if (CanGetCookies(cookie_list))
536    DoLoadCookies();
537  else
538    DoStartTransaction();
539}
540
541void URLRequestHttpJob::OnCookiesLoaded(
542    const std::string& cookie_line,
543    const std::vector<net::CookieStore::CookieInfo>& cookie_infos) {
544  if (!cookie_line.empty()) {
545    request_info_.extra_headers.SetHeader(
546        HttpRequestHeaders::kCookie, cookie_line);
547  }
548  DoStartTransaction();
549}
550
551void URLRequestHttpJob::DoStartTransaction() {
552  // We may have been canceled while retrieving cookies.
553  if (GetStatus().is_success()) {
554    StartTransaction();
555  } else {
556    NotifyCanceled();
557  }
558}
559
560void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete(int result) {
561  if (result != net::OK) {
562    std::string source("delegate");
563    request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
564                                 NetLog::StringCallback("source", &source));
565    NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
566    return;
567  }
568
569  DCHECK(transaction_.get());
570
571  const HttpResponseInfo* response_info = transaction_->GetResponseInfo();
572  DCHECK(response_info);
573
574  response_cookies_.clear();
575  response_cookies_save_index_ = 0;
576
577  FetchResponseCookies(&response_cookies_);
578
579  if (!GetResponseHeaders()->GetDateValue(&response_date_))
580    response_date_ = base::Time();
581
582  // Now, loop over the response cookies, and attempt to persist each.
583  SaveNextCookie();
584}
585
586// If the save occurs synchronously, SaveNextCookie will loop and save the next
587// cookie. If the save is deferred, the callback is responsible for continuing
588// to iterate through the cookies.
589// TODO(erikwright): Modify the CookieStore API to indicate via return value
590// whether it completed synchronously or asynchronously.
591// See http://crbug.com/131066.
592void URLRequestHttpJob::SaveNextCookie() {
593  // No matter what, we want to report our status as IO pending since we will
594  // be notifying our consumer asynchronously via OnStartCompleted.
595  SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
596
597  // Used to communicate with the callback. See the implementation of
598  // OnCookieSaved.
599  scoped_refptr<SharedBoolean> callback_pending = new SharedBoolean(false);
600  scoped_refptr<SharedBoolean> save_next_cookie_running =
601      new SharedBoolean(true);
602
603  if (!(request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) &&
604      request_->context()->cookie_store() &&
605      response_cookies_.size() > 0) {
606    CookieOptions options;
607    options.set_include_httponly();
608    options.set_server_time(response_date_);
609
610    net::CookieStore::SetCookiesCallback callback(
611        base::Bind(&URLRequestHttpJob::OnCookieSaved,
612                   weak_factory_.GetWeakPtr(),
613                   save_next_cookie_running,
614                   callback_pending));
615
616    // Loop through the cookies as long as SetCookieWithOptionsAsync completes
617    // synchronously.
618    while (!callback_pending->data &&
619           response_cookies_save_index_ < response_cookies_.size()) {
620      if (CanSetCookie(
621          response_cookies_[response_cookies_save_index_], &options)) {
622        callback_pending->data = true;
623        request_->context()->cookie_store()->SetCookieWithOptionsAsync(
624            request_->url(), response_cookies_[response_cookies_save_index_],
625            options, callback);
626      }
627      ++response_cookies_save_index_;
628    }
629  }
630
631  save_next_cookie_running->data = false;
632
633  if (!callback_pending->data) {
634    response_cookies_.clear();
635    response_cookies_save_index_ = 0;
636    SetStatus(URLRequestStatus());  // Clear the IO_PENDING status
637    NotifyHeadersComplete();
638    return;
639  }
640}
641
642// |save_next_cookie_running| is true when the callback is bound and set to
643// false when SaveNextCookie exits, allowing the callback to determine if the
644// save occurred synchronously or asynchronously.
645// |callback_pending| is false when the callback is invoked and will be set to
646// true by the callback, allowing SaveNextCookie to detect whether the save
647// occurred synchronously.
648// See SaveNextCookie() for more information.
649void URLRequestHttpJob::OnCookieSaved(
650    scoped_refptr<SharedBoolean> save_next_cookie_running,
651    scoped_refptr<SharedBoolean> callback_pending,
652    bool cookie_status) {
653  callback_pending->data = false;
654
655  // If we were called synchronously, return.
656  if (save_next_cookie_running->data) {
657    return;
658  }
659
660  // We were called asynchronously, so trigger the next save.
661  // We may have been canceled within OnSetCookie.
662  if (GetStatus().is_success()) {
663    SaveNextCookie();
664  } else {
665    NotifyCanceled();
666  }
667}
668
669void URLRequestHttpJob::FetchResponseCookies(
670    std::vector<std::string>* cookies) {
671  const std::string name = "Set-Cookie";
672  std::string value;
673
674  void* iter = NULL;
675  HttpResponseHeaders* headers = GetResponseHeaders();
676  while (headers->EnumerateHeader(&iter, name, &value)) {
677    if (!value.empty())
678      cookies->push_back(value);
679  }
680}
681
682// NOTE: |ProcessStrictTransportSecurityHeader| and
683// |ProcessPublicKeyPinsHeader| have very similar structures, by design.
684// They manipulate different parts of |TransportSecurityState::DomainState|,
685// and they must remain complementary. If, in future changes here, there is
686// any conflict between their policies (such as in |domain_state.mode|), you
687// should resolve the conflict in favor of the more strict policy.
688void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() {
689  DCHECK(response_info_);
690
691  const URLRequestContext* ctx = request_->context();
692  const SSLInfo& ssl_info = response_info_->ssl_info;
693
694  // Only accept strict transport security headers on HTTPS connections that
695  // have no certificate errors.
696  if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) ||
697      !ctx->transport_security_state()) {
698    return;
699  }
700
701  TransportSecurityState* security_state = ctx->transport_security_state();
702  TransportSecurityState::DomainState domain_state;
703  const std::string& host = request_info_.url.host();
704
705  bool sni_available =
706      SSLConfigService::IsSNIAvailable(ctx->ssl_config_service());
707  if (!security_state->GetDomainState(host, sni_available, &domain_state))
708    // |GetDomainState| may have altered |domain_state| while searching. If
709    // not found, start with a fresh state.
710    domain_state.upgrade_mode =
711        TransportSecurityState::DomainState::MODE_FORCE_HTTPS;
712
713  HttpResponseHeaders* headers = GetResponseHeaders();
714  std::string value;
715  void* iter = NULL;
716  base::Time now = base::Time::Now();
717
718  // http://tools.ietf.org/html/draft-ietf-websec-strict-transport-sec:
719  //
720  //   If a UA receives more than one STS header field in a HTTP response
721  //   message over secure transport, then the UA MUST process only the
722  //   first such header field.
723  bool seen_sts = false;
724  while (headers->EnumerateHeader(&iter, "Strict-Transport-Security", &value)) {
725    if (seen_sts)
726      return;
727    seen_sts = true;
728    TransportSecurityState::DomainState domain_state;
729    if (domain_state.ParseSTSHeader(now, value))
730      security_state->EnableHost(host, domain_state);
731  }
732}
733
734void URLRequestHttpJob::ProcessPublicKeyPinsHeader() {
735  DCHECK(response_info_);
736
737  const URLRequestContext* ctx = request_->context();
738  const SSLInfo& ssl_info = response_info_->ssl_info;
739
740  // Only accept public key pins headers on HTTPS connections that have no
741  // certificate errors.
742  if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) ||
743      !ctx->transport_security_state()) {
744    return;
745  }
746
747  TransportSecurityState* security_state = ctx->transport_security_state();
748  TransportSecurityState::DomainState domain_state;
749  const std::string& host = request_info_.url.host();
750
751  bool sni_available =
752      SSLConfigService::IsSNIAvailable(ctx->ssl_config_service());
753  if (!security_state->GetDomainState(host, sni_available, &domain_state))
754    // |GetDomainState| may have altered |domain_state| while searching. If
755    // not found, start with a fresh state.
756    domain_state.upgrade_mode =
757        TransportSecurityState::DomainState::MODE_DEFAULT;
758
759  HttpResponseHeaders* headers = GetResponseHeaders();
760  void* iter = NULL;
761  std::string value;
762  base::Time now = base::Time::Now();
763
764  while (headers->EnumerateHeader(&iter, "Public-Key-Pins", &value)) {
765    // Note that ParsePinsHeader updates |domain_state| (iff the header parses
766    // correctly), but does not completely overwrite it. It just updates the
767    // dynamic pinning metadata.
768    if (domain_state.ParsePinsHeader(now, value, ssl_info))
769      security_state->EnableHost(host, domain_state);
770  }
771}
772
773void URLRequestHttpJob::OnStartCompleted(int result) {
774  RecordTimer();
775
776  // If the request was destroyed, then there is no more work to do.
777  if (!request_)
778    return;
779
780  // If the transaction was destroyed, then the job was cancelled, and
781  // we can just ignore this notification.
782  if (!transaction_.get())
783    return;
784
785  // Clear the IO_PENDING status
786  SetStatus(URLRequestStatus());
787
788  const URLRequestContext* context = request_->context();
789
790  if (result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN &&
791      transaction_->GetResponseInfo() != NULL) {
792    FraudulentCertificateReporter* reporter =
793      context->fraudulent_certificate_reporter();
794    if (reporter != NULL) {
795      const SSLInfo& ssl_info = transaction_->GetResponseInfo()->ssl_info;
796      bool sni_available = SSLConfigService::IsSNIAvailable(
797          context->ssl_config_service());
798      const std::string& host = request_->url().host();
799
800      reporter->SendReport(host, ssl_info, sni_available);
801    }
802  }
803
804  if (result == OK) {
805    scoped_refptr<HttpResponseHeaders> headers = GetResponseHeaders();
806    if (context->network_delegate()) {
807      // Note that |this| may not be deleted until
808      // |on_headers_received_callback_| or
809      // |NetworkDelegate::URLRequestDestroyed()| has been called.
810      int error = context->network_delegate()->
811          NotifyHeadersReceived(request_, on_headers_received_callback_,
812                                headers, &override_response_headers_);
813      if (error != net::OK) {
814        if (error == net::ERR_IO_PENDING) {
815          awaiting_callback_ = true;
816          request_->net_log().BeginEvent(
817              NetLog::TYPE_URL_REQUEST_BLOCKED_ON_DELEGATE);
818        } else {
819          std::string source("delegate");
820          request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
821                                       NetLog::StringCallback("source",
822                                                              &source));
823          NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, error));
824        }
825        return;
826      }
827    }
828
829    SaveCookiesAndNotifyHeadersComplete(net::OK);
830  } else if (IsCertificateError(result)) {
831    // We encountered an SSL certificate error.  Ask our delegate to decide
832    // what we should do.
833
834    TransportSecurityState::DomainState domain_state;
835    const URLRequestContext* context = request_->context();
836    const bool fatal =
837        context->transport_security_state() &&
838        context->transport_security_state()->GetDomainState(
839            request_info_.url.host(),
840            SSLConfigService::IsSNIAvailable(context->ssl_config_service()),
841            &domain_state);
842    NotifySSLCertificateError(transaction_->GetResponseInfo()->ssl_info, fatal);
843  } else if (result == ERR_SSL_CLIENT_AUTH_CERT_NEEDED) {
844    NotifyCertificateRequested(
845        transaction_->GetResponseInfo()->cert_request_info);
846  } else {
847    NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
848  }
849}
850
851void URLRequestHttpJob::OnHeadersReceivedCallback(int result) {
852  request_->net_log().EndEvent(NetLog::TYPE_URL_REQUEST_BLOCKED_ON_DELEGATE);
853  awaiting_callback_ = false;
854
855  // Check that there are no callbacks to already canceled requests.
856  DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status());
857
858  SaveCookiesAndNotifyHeadersComplete(result);
859}
860
861void URLRequestHttpJob::OnReadCompleted(int result) {
862  read_in_progress_ = false;
863
864  if (ShouldFixMismatchedContentLength(result))
865    result = OK;
866
867  if (result == OK) {
868    NotifyDone(URLRequestStatus());
869  } else if (result < 0) {
870    NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
871  } else {
872    // Clear the IO_PENDING status
873    SetStatus(URLRequestStatus());
874  }
875
876  NotifyReadComplete(result);
877}
878
879void URLRequestHttpJob::RestartTransactionWithAuth(
880    const AuthCredentials& credentials) {
881  auth_credentials_ = credentials;
882
883  // These will be reset in OnStartCompleted.
884  response_info_ = NULL;
885  response_cookies_.clear();
886
887  ResetTimer();
888
889  // Update the cookies, since the cookie store may have been updated from the
890  // headers in the 401/407. Since cookies were already appended to
891  // extra_headers, we need to strip them out before adding them again.
892  request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kCookie);
893
894  AddCookieHeaderAndStart();
895}
896
897void URLRequestHttpJob::SetUpload(UploadData* upload) {
898  DCHECK(!transaction_.get()) << "cannot change once started";
899  request_info_.upload_data = upload;
900}
901
902void URLRequestHttpJob::SetExtraRequestHeaders(
903    const HttpRequestHeaders& headers) {
904  DCHECK(!transaction_.get()) << "cannot change once started";
905  request_info_.extra_headers.CopyFrom(headers);
906}
907
908void URLRequestHttpJob::Start() {
909  DCHECK(!transaction_.get());
910
911  // Ensure that we do not send username and password fields in the referrer.
912  GURL referrer(request_->GetSanitizedReferrer());
913
914  request_info_.url = request_->url();
915  request_info_.method = request_->method();
916  request_info_.load_flags = request_->load_flags();
917  request_info_.priority = request_->priority();
918  request_info_.request_id = request_->identifier();
919
920  // Strip Referer from request_info_.extra_headers to prevent, e.g., plugins
921  // from overriding headers that are controlled using other means. Otherwise a
922  // plugin could set a referrer although sending the referrer is inhibited.
923  request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kReferer);
924
925  // Our consumer should have made sure that this is a safe referrer.  See for
926  // instance WebCore::FrameLoader::HideReferrer.
927  if (referrer.is_valid()) {
928    request_info_.extra_headers.SetHeader(HttpRequestHeaders::kReferer,
929                                          referrer.spec());
930  }
931
932  request_info_.extra_headers.SetHeaderIfMissing(
933      HttpRequestHeaders::kUserAgent,
934      http_user_agent_settings_ ?
935          http_user_agent_settings_->GetUserAgent(request_->url()) :
936          EmptyString());
937
938  AddExtraHeaders();
939  AddCookieHeaderAndStart();
940}
941
942void URLRequestHttpJob::Kill() {
943  http_transaction_delegate_->OnDetachRequest();
944
945  if (!transaction_.get())
946    return;
947
948  weak_factory_.InvalidateWeakPtrs();
949  DestroyTransaction();
950  URLRequestJob::Kill();
951}
952
953LoadState URLRequestHttpJob::GetLoadState() const {
954  return transaction_.get() ?
955      transaction_->GetLoadState() : LOAD_STATE_IDLE;
956}
957
958UploadProgress URLRequestHttpJob::GetUploadProgress() const {
959  return transaction_.get() ?
960      transaction_->GetUploadProgress() : UploadProgress();
961}
962
963bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const {
964  DCHECK(transaction_.get());
965
966  if (!response_info_)
967    return false;
968
969  return GetResponseHeaders()->GetMimeType(mime_type);
970}
971
972bool URLRequestHttpJob::GetCharset(std::string* charset) {
973  DCHECK(transaction_.get());
974
975  if (!response_info_)
976    return false;
977
978  return GetResponseHeaders()->GetCharset(charset);
979}
980
981void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) {
982  DCHECK(request_);
983  DCHECK(transaction_.get());
984
985  if (response_info_) {
986    *info = *response_info_;
987    if (override_response_headers_)
988      info->headers = override_response_headers_;
989  }
990}
991
992bool URLRequestHttpJob::GetResponseCookies(std::vector<std::string>* cookies) {
993  DCHECK(transaction_.get());
994
995  if (!response_info_)
996    return false;
997
998  // TODO(darin): Why are we extracting response cookies again?  Perhaps we
999  // should just leverage response_cookies_.
1000
1001  cookies->clear();
1002  FetchResponseCookies(cookies);
1003  return true;
1004}
1005
1006int URLRequestHttpJob::GetResponseCode() const {
1007  DCHECK(transaction_.get());
1008
1009  if (!response_info_)
1010    return -1;
1011
1012  return GetResponseHeaders()->response_code();
1013}
1014
1015Filter* URLRequestHttpJob::SetupFilter() const {
1016  DCHECK(transaction_.get());
1017  if (!response_info_)
1018    return NULL;
1019
1020  std::vector<Filter::FilterType> encoding_types;
1021  std::string encoding_type;
1022  HttpResponseHeaders* headers = GetResponseHeaders();
1023  void* iter = NULL;
1024  while (headers->EnumerateHeader(&iter, "Content-Encoding", &encoding_type)) {
1025    encoding_types.push_back(Filter::ConvertEncodingToType(encoding_type));
1026  }
1027
1028  if (filter_context_->IsSdchResponse()) {
1029    // We are wary of proxies that discard or damage SDCH encoding.  If a server
1030    // explicitly states that this is not SDCH content, then we can correct our
1031    // assumption that this is an SDCH response, and avoid the need to recover
1032    // as though the content is corrupted (when we discover it is not SDCH
1033    // encoded).
1034    std::string sdch_response_status;
1035    iter = NULL;
1036    while (headers->EnumerateHeader(&iter, "X-Sdch-Encode",
1037                                    &sdch_response_status)) {
1038      if (sdch_response_status == "0") {
1039        filter_context_->ResetSdchResponseToFalse();
1040        break;
1041      }
1042    }
1043  }
1044
1045  // Even if encoding types are empty, there is a chance that we need to add
1046  // some decoding, as some proxies strip encoding completely. In such cases,
1047  // we may need to add (for example) SDCH filtering (when the context suggests
1048  // it is appropriate).
1049  Filter::FixupEncodingTypes(*filter_context_, &encoding_types);
1050
1051  return !encoding_types.empty()
1052      ? Filter::Factory(encoding_types, *filter_context_) : NULL;
1053}
1054
1055bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) {
1056  // We only allow redirects to certain "safe" protocols.  This does not
1057  // restrict redirects to externally handled protocols.  Our consumer would
1058  // need to take care of those.
1059
1060  if (!URLRequest::IsHandledURL(location))
1061    return true;
1062
1063  static const char* kSafeSchemes[] = {
1064    "http",
1065    "https",
1066    "ftp"
1067  };
1068
1069  for (size_t i = 0; i < arraysize(kSafeSchemes); ++i) {
1070    if (location.SchemeIs(kSafeSchemes[i]))
1071      return true;
1072  }
1073
1074  return false;
1075}
1076
1077bool URLRequestHttpJob::NeedsAuth() {
1078  int code = GetResponseCode();
1079  if (code == -1)
1080    return false;
1081
1082  // Check if we need either Proxy or WWW Authentication.  This could happen
1083  // because we either provided no auth info, or provided incorrect info.
1084  switch (code) {
1085    case 407:
1086      if (proxy_auth_state_ == AUTH_STATE_CANCELED)
1087        return false;
1088      proxy_auth_state_ = AUTH_STATE_NEED_AUTH;
1089      return true;
1090    case 401:
1091      if (server_auth_state_ == AUTH_STATE_CANCELED)
1092        return false;
1093      server_auth_state_ = AUTH_STATE_NEED_AUTH;
1094      return true;
1095  }
1096  return false;
1097}
1098
1099void URLRequestHttpJob::GetAuthChallengeInfo(
1100    scoped_refptr<AuthChallengeInfo>* result) {
1101  DCHECK(transaction_.get());
1102  DCHECK(response_info_);
1103
1104  // sanity checks:
1105  DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH ||
1106         server_auth_state_ == AUTH_STATE_NEED_AUTH);
1107  DCHECK((GetResponseHeaders()->response_code() == HTTP_UNAUTHORIZED) ||
1108         (GetResponseHeaders()->response_code() ==
1109          HTTP_PROXY_AUTHENTICATION_REQUIRED));
1110
1111  *result = response_info_->auth_challenge;
1112}
1113
1114void URLRequestHttpJob::SetAuth(const AuthCredentials& credentials) {
1115  DCHECK(transaction_.get());
1116
1117  // Proxy gets set first, then WWW.
1118  if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
1119    proxy_auth_state_ = AUTH_STATE_HAVE_AUTH;
1120  } else {
1121    DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH);
1122    server_auth_state_ = AUTH_STATE_HAVE_AUTH;
1123  }
1124
1125  RestartTransactionWithAuth(credentials);
1126}
1127
1128void URLRequestHttpJob::CancelAuth() {
1129  // Proxy gets set first, then WWW.
1130  if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
1131    proxy_auth_state_ = AUTH_STATE_CANCELED;
1132  } else {
1133    DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH);
1134    server_auth_state_ = AUTH_STATE_CANCELED;
1135  }
1136
1137  // These will be reset in OnStartCompleted.
1138  response_info_ = NULL;
1139  response_cookies_.clear();
1140
1141  ResetTimer();
1142
1143  // OK, let the consumer read the error page...
1144  //
1145  // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false,
1146  // which will cause the consumer to receive OnResponseStarted instead of
1147  // OnAuthRequired.
1148  //
1149  // We have to do this via InvokeLater to avoid "recursing" the consumer.
1150  //
1151  MessageLoop::current()->PostTask(
1152      FROM_HERE,
1153      base::Bind(&URLRequestHttpJob::OnStartCompleted,
1154                 weak_factory_.GetWeakPtr(), OK));
1155}
1156
1157void URLRequestHttpJob::ContinueWithCertificate(
1158    X509Certificate* client_cert) {
1159  DCHECK(transaction_.get());
1160
1161  DCHECK(!response_info_) << "should not have a response yet";
1162
1163  ResetTimer();
1164
1165  // No matter what, we want to report our status as IO pending since we will
1166  // be notifying our consumer asynchronously via OnStartCompleted.
1167  SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
1168
1169  int rv = transaction_->RestartWithCertificate(client_cert, start_callback_);
1170  if (rv == ERR_IO_PENDING)
1171    return;
1172
1173  // The transaction started synchronously, but we need to notify the
1174  // URLRequest delegate via the message loop.
1175  MessageLoop::current()->PostTask(
1176      FROM_HERE,
1177      base::Bind(&URLRequestHttpJob::OnStartCompleted,
1178                 weak_factory_.GetWeakPtr(), rv));
1179}
1180
1181void URLRequestHttpJob::ContinueDespiteLastError() {
1182  // If the transaction was destroyed, then the job was cancelled.
1183  if (!transaction_.get())
1184    return;
1185
1186  DCHECK(!response_info_) << "should not have a response yet";
1187
1188  ResetTimer();
1189
1190  // No matter what, we want to report our status as IO pending since we will
1191  // be notifying our consumer asynchronously via OnStartCompleted.
1192  SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
1193
1194  int rv = transaction_->RestartIgnoringLastError(start_callback_);
1195  if (rv == ERR_IO_PENDING)
1196    return;
1197
1198  // The transaction started synchronously, but we need to notify the
1199  // URLRequest delegate via the message loop.
1200  MessageLoop::current()->PostTask(
1201      FROM_HERE,
1202      base::Bind(&URLRequestHttpJob::OnStartCompleted,
1203                 weak_factory_.GetWeakPtr(), rv));
1204}
1205
1206bool URLRequestHttpJob::ShouldFixMismatchedContentLength(int rv) const {
1207  // Some servers send the body compressed, but specify the content length as
1208  // the uncompressed size.  Although this violates the HTTP spec we want to
1209  // support it (as IE and FireFox do), but *only* for an exact match.
1210  // See http://crbug.com/79694.
1211  if (rv == net::ERR_CONTENT_LENGTH_MISMATCH ||
1212      rv == net::ERR_INCOMPLETE_CHUNKED_ENCODING) {
1213    if (request_ && request_->response_headers()) {
1214      int64 expected_length = request_->response_headers()->GetContentLength();
1215      VLOG(1) << __FUNCTION__ << "() "
1216              << "\"" << request_->url().spec() << "\""
1217              << " content-length = " << expected_length
1218              << " pre total = " << prefilter_bytes_read()
1219              << " post total = " << postfilter_bytes_read();
1220      if (postfilter_bytes_read() == expected_length) {
1221        // Clear the error.
1222        return true;
1223      }
1224    }
1225  }
1226  return false;
1227}
1228
1229bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size,
1230                                    int* bytes_read) {
1231  DCHECK_NE(buf_size, 0);
1232  DCHECK(bytes_read);
1233  DCHECK(!read_in_progress_);
1234
1235  int rv = transaction_->Read(
1236      buf, buf_size,
1237      base::Bind(&URLRequestHttpJob::OnReadCompleted, base::Unretained(this)));
1238
1239  if (ShouldFixMismatchedContentLength(rv))
1240    rv = 0;
1241
1242  if (rv >= 0) {
1243    *bytes_read = rv;
1244    if (!rv)
1245      DoneWithRequest(FINISHED);
1246    return true;
1247  }
1248
1249  if (rv == ERR_IO_PENDING) {
1250    read_in_progress_ = true;
1251    SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
1252  } else {
1253    NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
1254  }
1255
1256  return false;
1257}
1258
1259void URLRequestHttpJob::StopCaching() {
1260  if (transaction_.get())
1261    transaction_->StopCaching();
1262}
1263
1264void URLRequestHttpJob::DoneReading() {
1265  if (transaction_.get())
1266    transaction_->DoneReading();
1267  DoneWithRequest(FINISHED);
1268}
1269
1270HostPortPair URLRequestHttpJob::GetSocketAddress() const {
1271  return response_info_ ? response_info_->socket_address : HostPortPair();
1272}
1273
1274URLRequestHttpJob::~URLRequestHttpJob() {
1275  CHECK(!awaiting_callback_);
1276
1277  DCHECK(!sdch_test_control_ || !sdch_test_activated_);
1278  if (!is_cached_content_) {
1279    if (sdch_test_control_)
1280      RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK);
1281    if (sdch_test_activated_)
1282      RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE);
1283  }
1284  // Make sure SDCH filters are told to emit histogram data while
1285  // filter_context_ is still alive.
1286  DestroyFilters();
1287
1288  if (sdch_dictionary_url_.is_valid()) {
1289    // Prior to reaching the destructor, request_ has been set to a NULL
1290    // pointer, so request_->url() is no longer valid in the destructor, and we
1291    // use an alternate copy |request_info_.url|.
1292    SdchManager* manager = SdchManager::Global();
1293    // To be extra safe, since this is a "different time" from when we decided
1294    // to get the dictionary, we'll validate that an SdchManager is available.
1295    // At shutdown time, care is taken to be sure that we don't delete this
1296    // globally useful instance "too soon," so this check is just defensive
1297    // coding to assure that IF the system is shutting down, we don't have any
1298    // problem if the manager was deleted ahead of time.
1299    if (manager)  // Defensive programming.
1300      manager->FetchDictionary(request_info_.url, sdch_dictionary_url_);
1301  }
1302  DoneWithRequest(ABORTED);
1303}
1304
1305void URLRequestHttpJob::RecordTimer() {
1306  if (request_creation_time_.is_null()) {
1307    NOTREACHED()
1308        << "The same transaction shouldn't start twice without new timing.";
1309    return;
1310  }
1311
1312  base::TimeDelta to_start = base::Time::Now() - request_creation_time_;
1313  request_creation_time_ = base::Time();
1314
1315  UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte", to_start);
1316
1317  static const bool use_overlapped_read_histogram =
1318      base::FieldTrialList::TrialExists("OverlappedReadImpact");
1319  if (use_overlapped_read_histogram) {
1320    UMA_HISTOGRAM_MEDIUM_TIMES(
1321        base::FieldTrial::MakeName("Net.HttpTimeToFirstByte",
1322                                   "OverlappedReadImpact"),
1323        to_start);
1324  }
1325
1326  static const bool use_warm_socket_impact_histogram =
1327      base::FieldTrialList::TrialExists("WarmSocketImpact");
1328  if (use_warm_socket_impact_histogram) {
1329    UMA_HISTOGRAM_MEDIUM_TIMES(
1330        base::FieldTrial::MakeName("Net.HttpTimeToFirstByte",
1331                                   "WarmSocketImpact"),
1332        to_start);
1333  }
1334
1335  static const bool use_prefetch_histogram =
1336      base::FieldTrialList::TrialExists("Prefetch");
1337  if (use_prefetch_histogram) {
1338    UMA_HISTOGRAM_MEDIUM_TIMES(
1339        base::FieldTrial::MakeName("Net.HttpTimeToFirstByte",
1340                                   "Prefetch"),
1341        to_start);
1342  }
1343  static const bool use_prerender_histogram =
1344      base::FieldTrialList::TrialExists("Prerender");
1345  if (use_prerender_histogram) {
1346    UMA_HISTOGRAM_MEDIUM_TIMES(
1347        base::FieldTrial::MakeName("Net.HttpTimeToFirstByte",
1348                                   "Prerender"),
1349        to_start);
1350  }
1351}
1352
1353void URLRequestHttpJob::ResetTimer() {
1354  if (!request_creation_time_.is_null()) {
1355    NOTREACHED()
1356        << "The timer was reset before it was recorded.";
1357    return;
1358  }
1359  request_creation_time_ = base::Time::Now();
1360}
1361
1362void URLRequestHttpJob::UpdatePacketReadTimes() {
1363  if (!packet_timing_enabled_)
1364    return;
1365
1366  if (filter_input_byte_count() <= bytes_observed_in_packets_) {
1367    DCHECK_EQ(filter_input_byte_count(), bytes_observed_in_packets_);
1368    return;  // No new bytes have arrived.
1369  }
1370
1371  final_packet_time_ = base::Time::Now();
1372  if (!bytes_observed_in_packets_)
1373    request_time_snapshot_ = request_ ? request_->request_time() : base::Time();
1374
1375  bytes_observed_in_packets_ = filter_input_byte_count();
1376}
1377
1378void URLRequestHttpJob::RecordPacketStats(
1379    FilterContext::StatisticSelector statistic) const {
1380  if (!packet_timing_enabled_ || (final_packet_time_ == base::Time()))
1381    return;
1382
1383  base::TimeDelta duration = final_packet_time_ - request_time_snapshot_;
1384  switch (statistic) {
1385    case FilterContext::SDCH_DECODE: {
1386      UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b",
1387          static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100);
1388      return;
1389    }
1390    case FilterContext::SDCH_PASSTHROUGH: {
1391      // Despite advertising a dictionary, we handled non-sdch compressed
1392      // content.
1393      return;
1394    }
1395
1396    case FilterContext::SDCH_EXPERIMENT_DECODE: {
1397      UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Decode",
1398                                  duration,
1399                                  base::TimeDelta::FromMilliseconds(20),
1400                                  base::TimeDelta::FromMinutes(10), 100);
1401      return;
1402    }
1403    case FilterContext::SDCH_EXPERIMENT_HOLDBACK: {
1404      UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Holdback",
1405                                  duration,
1406                                  base::TimeDelta::FromMilliseconds(20),
1407                                  base::TimeDelta::FromMinutes(10), 100);
1408      return;
1409    }
1410    default:
1411      NOTREACHED();
1412      return;
1413  }
1414}
1415
1416// The common type of histogram we use for all compression-tracking histograms.
1417#define COMPRESSION_HISTOGRAM(name, sample) \
1418    do { \
1419      UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \
1420                                  500, 1000000, 100); \
1421    } while (0)
1422
1423void URLRequestHttpJob::RecordCompressionHistograms() {
1424  DCHECK(request_);
1425  if (!request_)
1426    return;
1427
1428  if (is_cached_content_ ||                // Don't record cached content
1429      !GetStatus().is_success() ||         // Don't record failed content
1430      !IsCompressibleContent() ||          // Only record compressible content
1431      !prefilter_bytes_read())       // Zero-byte responses aren't useful.
1432    return;
1433
1434  // Miniature requests aren't really compressible.  Don't count them.
1435  const int kMinSize = 16;
1436  if (prefilter_bytes_read() < kMinSize)
1437    return;
1438
1439  // Only record for http or https urls.
1440  bool is_http = request_->url().SchemeIs("http");
1441  bool is_https = request_->url().SchemeIs("https");
1442  if (!is_http && !is_https)
1443    return;
1444
1445  int compressed_B = prefilter_bytes_read();
1446  int decompressed_B = postfilter_bytes_read();
1447  bool was_filtered = HasFilter();
1448
1449  // We want to record how often downloaded resources are compressed.
1450  // But, we recognize that different protocols may have different
1451  // properties.  So, for each request, we'll put it into one of 3
1452  // groups:
1453  //      a) SSL resources
1454  //         Proxies cannot tamper with compression headers with SSL.
1455  //      b) Non-SSL, loaded-via-proxy resources
1456  //         In this case, we know a proxy might have interfered.
1457  //      c) Non-SSL, loaded-without-proxy resources
1458  //         In this case, we know there was no explicit proxy.  However,
1459  //         it is possible that a transparent proxy was still interfering.
1460  //
1461  // For each group, we record the same 3 histograms.
1462
1463  if (is_https) {
1464    if (was_filtered) {
1465      COMPRESSION_HISTOGRAM("SSL.BytesBeforeCompression", compressed_B);
1466      COMPRESSION_HISTOGRAM("SSL.BytesAfterCompression", decompressed_B);
1467    } else {
1468      COMPRESSION_HISTOGRAM("SSL.ShouldHaveBeenCompressed", decompressed_B);
1469    }
1470    return;
1471  }
1472
1473  if (request_->was_fetched_via_proxy()) {
1474    if (was_filtered) {
1475      COMPRESSION_HISTOGRAM("Proxy.BytesBeforeCompression", compressed_B);
1476      COMPRESSION_HISTOGRAM("Proxy.BytesAfterCompression", decompressed_B);
1477    } else {
1478      COMPRESSION_HISTOGRAM("Proxy.ShouldHaveBeenCompressed", decompressed_B);
1479    }
1480    return;
1481  }
1482
1483  if (was_filtered) {
1484    COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B);
1485    COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B);
1486  } else {
1487    COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B);
1488  }
1489}
1490
1491bool URLRequestHttpJob::IsCompressibleContent() const {
1492  std::string mime_type;
1493  return GetMimeType(&mime_type) &&
1494      (IsSupportedJavascriptMimeType(mime_type.c_str()) ||
1495       IsSupportedNonImageMimeType(mime_type.c_str()));
1496}
1497
1498void URLRequestHttpJob::RecordPerfHistograms(CompletionCause reason) {
1499  if (start_time_.is_null())
1500    return;
1501
1502  base::TimeDelta total_time = base::TimeTicks::Now() - start_time_;
1503  UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTime", total_time);
1504
1505  if (reason == FINISHED) {
1506    UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeSuccess", total_time);
1507  } else {
1508    UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCancel", total_time);
1509  }
1510
1511  if (response_info_) {
1512    if (response_info_->was_cached) {
1513      UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCached", total_time);
1514    } else  {
1515      UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeNotCached", total_time);
1516    }
1517  }
1518
1519  static const bool use_overlapped_read_histogram =
1520      base::FieldTrialList::TrialExists("OverlappedReadImpact");
1521  if (use_overlapped_read_histogram) {
1522    UMA_HISTOGRAM_TIMES(
1523        base::FieldTrial::MakeName("Net.HttpJob.TotalTime",
1524                                   "OverlappedReadImpact"),
1525        total_time);
1526
1527    if (reason == FINISHED) {
1528      UMA_HISTOGRAM_TIMES(
1529          base::FieldTrial::MakeName("Net.HttpJob.TotalTimeSuccess",
1530                                     "OverlappedReadImpact"),
1531          total_time);
1532    } else {
1533      UMA_HISTOGRAM_TIMES(
1534          base::FieldTrial::MakeName("Net.HttpJob.TotalTimeCancel",
1535                                     "OverlappedReadImpact"),
1536          total_time);
1537    }
1538
1539    if (response_info_) {
1540      if (response_info_->was_cached) {
1541        UMA_HISTOGRAM_TIMES(
1542            base::FieldTrial::MakeName("Net.HttpJob.TotalTimeCached",
1543                                       "OverlappedReadImpact"),
1544            total_time);
1545      } else  {
1546        UMA_HISTOGRAM_TIMES(
1547            base::FieldTrial::MakeName("Net.HttpJob.TotalTimeNotCached",
1548                                       "OverlappedReadImpact"),
1549            total_time);
1550      }
1551    }
1552  }
1553
1554  start_time_ = base::TimeTicks();
1555}
1556
1557void URLRequestHttpJob::DoneWithRequest(CompletionCause reason) {
1558  if (done_)
1559    return;
1560  done_ = true;
1561  RecordPerfHistograms(reason);
1562  if (reason == FINISHED) {
1563    request_->set_received_response_content_length(prefilter_bytes_read());
1564    RecordCompressionHistograms();
1565  }
1566}
1567
1568HttpResponseHeaders* URLRequestHttpJob::GetResponseHeaders() const {
1569  DCHECK(transaction_.get());
1570  DCHECK(transaction_->GetResponseInfo());
1571  return override_response_headers_.get() ?
1572      override_response_headers_ :
1573      transaction_->GetResponseInfo()->headers;
1574}
1575
1576void URLRequestHttpJob::NotifyURLRequestDestroyed() {
1577  awaiting_callback_ = false;
1578}
1579
1580void URLRequestHttpJob::OnDetachRequest() {
1581  http_transaction_delegate_->OnDetachRequest();
1582}
1583
1584}  // namespace net
1585