url_request_http_job.cc revision b2df76ea8fec9e32f6f3718986dba0d95315b29c
1// Copyright (c) 2012 The Chromium Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "net/url_request/url_request_http_job.h" 6 7#include "base/base_switches.h" 8#include "base/bind.h" 9#include "base/bind_helpers.h" 10#include "base/command_line.h" 11#include "base/compiler_specific.h" 12#include "base/file_util.h" 13#include "base/file_version_info.h" 14#include "base/message_loop.h" 15#include "base/metrics/field_trial.h" 16#include "base/metrics/histogram.h" 17#include "base/rand_util.h" 18#include "base/string_util.h" 19#include "base/time.h" 20#include "net/base/filter.h" 21#include "net/base/host_port_pair.h" 22#include "net/base/load_flags.h" 23#include "net/base/mime_util.h" 24#include "net/base/net_errors.h" 25#include "net/base/net_util.h" 26#include "net/base/network_delegate.h" 27#include "net/base/sdch_manager.h" 28#include "net/cert/cert_status_flags.h" 29#include "net/cookies/cookie_monster.h" 30#include "net/http/http_network_session.h" 31#include "net/http/http_request_headers.h" 32#include "net/http/http_response_headers.h" 33#include "net/http/http_response_info.h" 34#include "net/http/http_status_code.h" 35#include "net/http/http_transaction.h" 36#include "net/http/http_transaction_delegate.h" 37#include "net/http/http_transaction_factory.h" 38#include "net/http/http_util.h" 39#include "net/ssl/ssl_cert_request_info.h" 40#include "net/ssl/ssl_config_service.h" 41#include "net/url_request/fraudulent_certificate_reporter.h" 42#include "net/url_request/http_user_agent_settings.h" 43#include "net/url_request/url_request.h" 44#include "net/url_request/url_request_context.h" 45#include "net/url_request/url_request_error_job.h" 46#include "net/url_request/url_request_job_factory.h" 47#include "net/url_request/url_request_redirect_job.h" 48#include "net/url_request/url_request_throttler_header_adapter.h" 49#include "net/url_request/url_request_throttler_manager.h" 50 51static const char kAvailDictionaryHeader[] = "Avail-Dictionary"; 52 53namespace net { 54 55class URLRequestHttpJob::HttpFilterContext : public FilterContext { 56 public: 57 explicit HttpFilterContext(URLRequestHttpJob* job); 58 virtual ~HttpFilterContext(); 59 60 // FilterContext implementation. 61 virtual bool GetMimeType(std::string* mime_type) const OVERRIDE; 62 virtual bool GetURL(GURL* gurl) const OVERRIDE; 63 virtual base::Time GetRequestTime() const OVERRIDE; 64 virtual bool IsCachedContent() const OVERRIDE; 65 virtual bool IsDownload() const OVERRIDE; 66 virtual bool IsSdchResponse() const OVERRIDE; 67 virtual int64 GetByteReadCount() const OVERRIDE; 68 virtual int GetResponseCode() const OVERRIDE; 69 virtual void RecordPacketStats(StatisticSelector statistic) const OVERRIDE; 70 71 // Method to allow us to reset filter context for a response that should have 72 // been SDCH encoded when there is an update due to an explicit HTTP header. 73 void ResetSdchResponseToFalse(); 74 75 private: 76 URLRequestHttpJob* job_; 77 78 DISALLOW_COPY_AND_ASSIGN(HttpFilterContext); 79}; 80 81class URLRequestHttpJob::HttpTransactionDelegateImpl 82 : public HttpTransactionDelegate { 83 public: 84 HttpTransactionDelegateImpl( 85 URLRequest* request, NetworkDelegate* network_delegate) 86 : request_(request), 87 network_delegate_(network_delegate), 88 cache_active_(false), 89 network_active_(false) { 90 } 91 virtual ~HttpTransactionDelegateImpl() { 92 OnDetachRequest(); 93 } 94 void OnDetachRequest() { 95 if (request_ == NULL || network_delegate_ == NULL) 96 return; 97 network_delegate_->NotifyRequestWaitStateChange( 98 *request_, 99 NetworkDelegate::REQUEST_WAIT_STATE_RESET); 100 cache_active_ = false; 101 network_active_ = false; 102 request_ = NULL; 103 } 104 virtual void OnCacheActionStart() OVERRIDE { 105 if (request_ == NULL || network_delegate_ == NULL) 106 return; 107 DCHECK(!cache_active_ && !network_active_); 108 cache_active_ = true; 109 network_delegate_->NotifyRequestWaitStateChange( 110 *request_, 111 NetworkDelegate::REQUEST_WAIT_STATE_CACHE_START); 112 } 113 virtual void OnCacheActionFinish() OVERRIDE { 114 if (request_ == NULL || network_delegate_ == NULL) 115 return; 116 DCHECK(cache_active_ && !network_active_); 117 cache_active_ = false; 118 network_delegate_->NotifyRequestWaitStateChange( 119 *request_, 120 NetworkDelegate::REQUEST_WAIT_STATE_CACHE_FINISH); 121 } 122 virtual void OnNetworkActionStart() OVERRIDE { 123 if (request_ == NULL || network_delegate_ == NULL) 124 return; 125 DCHECK(!cache_active_ && !network_active_); 126 network_active_ = true; 127 network_delegate_->NotifyRequestWaitStateChange( 128 *request_, 129 NetworkDelegate::REQUEST_WAIT_STATE_NETWORK_START); 130 } 131 virtual void OnNetworkActionFinish() OVERRIDE { 132 if (request_ == NULL || network_delegate_ == NULL) 133 return; 134 DCHECK(!cache_active_ && network_active_); 135 network_active_ = false; 136 network_delegate_->NotifyRequestWaitStateChange( 137 *request_, 138 NetworkDelegate::REQUEST_WAIT_STATE_NETWORK_FINISH); 139 } 140 private: 141 URLRequest* request_; 142 NetworkDelegate* network_delegate_; 143 bool cache_active_; 144 bool network_active_; 145}; 146 147URLRequestHttpJob::HttpFilterContext::HttpFilterContext(URLRequestHttpJob* job) 148 : job_(job) { 149 DCHECK(job_); 150} 151 152URLRequestHttpJob::HttpFilterContext::~HttpFilterContext() { 153} 154 155bool URLRequestHttpJob::HttpFilterContext::GetMimeType( 156 std::string* mime_type) const { 157 return job_->GetMimeType(mime_type); 158} 159 160bool URLRequestHttpJob::HttpFilterContext::GetURL(GURL* gurl) const { 161 if (!job_->request()) 162 return false; 163 *gurl = job_->request()->url(); 164 return true; 165} 166 167base::Time URLRequestHttpJob::HttpFilterContext::GetRequestTime() const { 168 return job_->request() ? job_->request()->request_time() : base::Time(); 169} 170 171bool URLRequestHttpJob::HttpFilterContext::IsCachedContent() const { 172 return job_->is_cached_content_; 173} 174 175bool URLRequestHttpJob::HttpFilterContext::IsDownload() const { 176 return (job_->request_info_.load_flags & LOAD_IS_DOWNLOAD) != 0; 177} 178 179void URLRequestHttpJob::HttpFilterContext::ResetSdchResponseToFalse() { 180 DCHECK(job_->sdch_dictionary_advertised_); 181 job_->sdch_dictionary_advertised_ = false; 182} 183 184bool URLRequestHttpJob::HttpFilterContext::IsSdchResponse() const { 185 return job_->sdch_dictionary_advertised_; 186} 187 188int64 URLRequestHttpJob::HttpFilterContext::GetByteReadCount() const { 189 return job_->filter_input_byte_count(); 190} 191 192int URLRequestHttpJob::HttpFilterContext::GetResponseCode() const { 193 return job_->GetResponseCode(); 194} 195 196void URLRequestHttpJob::HttpFilterContext::RecordPacketStats( 197 StatisticSelector statistic) const { 198 job_->RecordPacketStats(statistic); 199} 200 201// TODO(darin): make sure the port blocking code is not lost 202// static 203URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request, 204 NetworkDelegate* network_delegate, 205 const std::string& scheme) { 206 DCHECK(scheme == "http" || scheme == "https"); 207 208 if (!request->context()->http_transaction_factory()) { 209 NOTREACHED() << "requires a valid context"; 210 return new URLRequestErrorJob( 211 request, network_delegate, ERR_INVALID_ARGUMENT); 212 } 213 214 GURL redirect_url; 215 if (request->GetHSTSRedirect(&redirect_url)) { 216 return new URLRequestRedirectJob( 217 request, network_delegate, redirect_url, 218 // Use status code 307 to preserve the method, so POST requests work. 219 URLRequestRedirectJob::REDIRECT_307_TEMPORARY_REDIRECT); 220 } 221 return new URLRequestHttpJob(request, 222 network_delegate, 223 request->context()->http_user_agent_settings()); 224} 225 226 227URLRequestHttpJob::URLRequestHttpJob( 228 URLRequest* request, 229 NetworkDelegate* network_delegate, 230 const HttpUserAgentSettings* http_user_agent_settings) 231 : URLRequestJob(request, network_delegate), 232 priority_(DEFAULT_PRIORITY), 233 response_info_(NULL), 234 response_cookies_save_index_(0), 235 proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH), 236 server_auth_state_(AUTH_STATE_DONT_NEED_AUTH), 237 start_callback_(base::Bind( 238 &URLRequestHttpJob::OnStartCompleted, base::Unretained(this))), 239 notify_before_headers_sent_callback_(base::Bind( 240 &URLRequestHttpJob::NotifyBeforeSendHeadersCallback, 241 base::Unretained(this))), 242 read_in_progress_(false), 243 transaction_(NULL), 244 throttling_entry_(NULL), 245 sdch_dictionary_advertised_(false), 246 sdch_test_activated_(false), 247 sdch_test_control_(false), 248 is_cached_content_(false), 249 request_creation_time_(), 250 packet_timing_enabled_(false), 251 done_(false), 252 bytes_observed_in_packets_(0), 253 request_time_snapshot_(), 254 final_packet_time_(), 255 filter_context_(new HttpFilterContext(this)), 256 weak_factory_(this), 257 on_headers_received_callback_(base::Bind( 258 &URLRequestHttpJob::OnHeadersReceivedCallback, 259 base::Unretained(this))), 260 awaiting_callback_(false), 261 http_transaction_delegate_(new HttpTransactionDelegateImpl( 262 request, network_delegate)), 263 http_user_agent_settings_(http_user_agent_settings) { 264 URLRequestThrottlerManager* manager = request->context()->throttler_manager(); 265 if (manager) 266 throttling_entry_ = manager->RegisterRequestUrl(request->url()); 267 268 ResetTimer(); 269} 270 271URLRequestHttpJob::~URLRequestHttpJob() { 272 CHECK(!awaiting_callback_); 273 274 DCHECK(!sdch_test_control_ || !sdch_test_activated_); 275 if (!is_cached_content_) { 276 if (sdch_test_control_) 277 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK); 278 if (sdch_test_activated_) 279 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE); 280 } 281 // Make sure SDCH filters are told to emit histogram data while 282 // filter_context_ is still alive. 283 DestroyFilters(); 284 285 if (sdch_dictionary_url_.is_valid()) { 286 // Prior to reaching the destructor, request_ has been set to a NULL 287 // pointer, so request_->url() is no longer valid in the destructor, and we 288 // use an alternate copy |request_info_.url|. 289 SdchManager* manager = SdchManager::Global(); 290 // To be extra safe, since this is a "different time" from when we decided 291 // to get the dictionary, we'll validate that an SdchManager is available. 292 // At shutdown time, care is taken to be sure that we don't delete this 293 // globally useful instance "too soon," so this check is just defensive 294 // coding to assure that IF the system is shutting down, we don't have any 295 // problem if the manager was deleted ahead of time. 296 if (manager) // Defensive programming. 297 manager->FetchDictionary(request_info_.url, sdch_dictionary_url_); 298 } 299 DoneWithRequest(ABORTED); 300} 301 302void URLRequestHttpJob::SetPriority(RequestPriority priority) { 303 priority_ = priority; 304 if (transaction_) 305 transaction_->SetPriority(priority_); 306} 307 308void URLRequestHttpJob::Start() { 309 DCHECK(!transaction_.get()); 310 311 // URLRequest::SetReferrer ensures that we do not send username and password 312 // fields in the referrer. 313 GURL referrer(request_->referrer()); 314 315 request_info_.url = request_->url(); 316 request_info_.method = request_->method(); 317 request_info_.load_flags = request_->load_flags(); 318 request_info_.request_id = request_->identifier(); 319 320 // Strip Referer from request_info_.extra_headers to prevent, e.g., plugins 321 // from overriding headers that are controlled using other means. Otherwise a 322 // plugin could set a referrer although sending the referrer is inhibited. 323 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kReferer); 324 325 // Our consumer should have made sure that this is a safe referrer. See for 326 // instance WebCore::FrameLoader::HideReferrer. 327 if (referrer.is_valid()) { 328 request_info_.extra_headers.SetHeader(HttpRequestHeaders::kReferer, 329 referrer.spec()); 330 } 331 332 request_info_.extra_headers.SetHeaderIfMissing( 333 HttpRequestHeaders::kUserAgent, 334 http_user_agent_settings_ ? 335 http_user_agent_settings_->GetUserAgent(request_->url()) : 336 EmptyString()); 337 338 AddExtraHeaders(); 339 AddCookieHeaderAndStart(); 340} 341 342void URLRequestHttpJob::Kill() { 343 http_transaction_delegate_->OnDetachRequest(); 344 345 if (!transaction_.get()) 346 return; 347 348 weak_factory_.InvalidateWeakPtrs(); 349 DestroyTransaction(); 350 URLRequestJob::Kill(); 351} 352 353void URLRequestHttpJob::NotifyHeadersComplete() { 354 DCHECK(!response_info_); 355 356 response_info_ = transaction_->GetResponseInfo(); 357 358 // Save boolean, as we'll need this info at destruction time, and filters may 359 // also need this info. 360 is_cached_content_ = response_info_->was_cached; 361 362 if (!is_cached_content_ && throttling_entry_) { 363 URLRequestThrottlerHeaderAdapter response_adapter(GetResponseHeaders()); 364 throttling_entry_->UpdateWithResponse(request_info_.url.host(), 365 &response_adapter); 366 } 367 368 // The ordering of these calls is not important. 369 ProcessStrictTransportSecurityHeader(); 370 ProcessPublicKeyPinsHeader(); 371 372 if (SdchManager::Global() && 373 SdchManager::Global()->IsInSupportedDomain(request_->url())) { 374 const std::string name = "Get-Dictionary"; 375 std::string url_text; 376 void* iter = NULL; 377 // TODO(jar): We need to not fetch dictionaries the first time they are 378 // seen, but rather wait until we can justify their usefulness. 379 // For now, we will only fetch the first dictionary, which will at least 380 // require multiple suggestions before we get additional ones for this site. 381 // Eventually we should wait until a dictionary is requested several times 382 // before we even download it (so that we don't waste memory or bandwidth). 383 if (GetResponseHeaders()->EnumerateHeader(&iter, name, &url_text)) { 384 // request_->url() won't be valid in the destructor, so we use an 385 // alternate copy. 386 DCHECK_EQ(request_->url(), request_info_.url); 387 // Resolve suggested URL relative to request url. 388 sdch_dictionary_url_ = request_info_.url.Resolve(url_text); 389 } 390 } 391 392 // The HTTP transaction may be restarted several times for the purposes 393 // of sending authorization information. Each time it restarts, we get 394 // notified of the headers completion so that we can update the cookie store. 395 if (transaction_->IsReadyToRestartForAuth()) { 396 DCHECK(!response_info_->auth_challenge.get()); 397 // TODO(battre): This breaks the webrequest API for 398 // URLRequestTestHTTP.BasicAuthWithCookies 399 // where OnBeforeSendHeaders -> OnSendHeaders -> OnBeforeSendHeaders 400 // occurs. 401 RestartTransactionWithAuth(AuthCredentials()); 402 return; 403 } 404 405 URLRequestJob::NotifyHeadersComplete(); 406} 407 408void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) { 409 DoneWithRequest(FINISHED); 410 URLRequestJob::NotifyDone(status); 411} 412 413void URLRequestHttpJob::DestroyTransaction() { 414 DCHECK(transaction_.get()); 415 416 DoneWithRequest(ABORTED); 417 transaction_.reset(); 418 response_info_ = NULL; 419} 420 421void URLRequestHttpJob::StartTransaction() { 422 if (network_delegate()) { 423 int rv = network_delegate()->NotifyBeforeSendHeaders( 424 request_, notify_before_headers_sent_callback_, 425 &request_info_.extra_headers); 426 // If an extension blocks the request, we rely on the callback to 427 // MaybeStartTransactionInternal(). 428 if (rv == ERR_IO_PENDING) { 429 SetBlockedOnDelegate(); 430 return; 431 } 432 MaybeStartTransactionInternal(rv); 433 return; 434 } 435 StartTransactionInternal(); 436} 437 438void URLRequestHttpJob::NotifyBeforeSendHeadersCallback(int result) { 439 SetUnblockedOnDelegate(); 440 441 // Check that there are no callbacks to already canceled requests. 442 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); 443 444 MaybeStartTransactionInternal(result); 445} 446 447void URLRequestHttpJob::MaybeStartTransactionInternal(int result) { 448 if (result == OK) { 449 StartTransactionInternal(); 450 } else { 451 std::string source("delegate"); 452 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 453 NetLog::StringCallback("source", &source)); 454 NotifyCanceled(); 455 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 456 } 457} 458 459void URLRequestHttpJob::StartTransactionInternal() { 460 // NOTE: This method assumes that request_info_ is already setup properly. 461 462 // If we already have a transaction, then we should restart the transaction 463 // with auth provided by auth_credentials_. 464 465 int rv; 466 467 if (network_delegate()) { 468 network_delegate()->NotifySendHeaders( 469 request_, request_info_.extra_headers); 470 } 471 472 if (transaction_.get()) { 473 rv = transaction_->RestartWithAuth(auth_credentials_, start_callback_); 474 auth_credentials_ = AuthCredentials(); 475 } else { 476 DCHECK(request_->context()->http_transaction_factory()); 477 478 rv = request_->context()->http_transaction_factory()->CreateTransaction( 479 priority_, &transaction_, http_transaction_delegate_.get()); 480 if (rv == OK) { 481 if (!throttling_entry_ || 482 !throttling_entry_->ShouldRejectRequest(*request_)) { 483 rv = transaction_->Start( 484 &request_info_, start_callback_, request_->net_log()); 485 start_time_ = base::TimeTicks::Now(); 486 } else { 487 // Special error code for the exponential back-off module. 488 rv = ERR_TEMPORARILY_THROTTLED; 489 } 490 } 491 } 492 493 if (rv == ERR_IO_PENDING) 494 return; 495 496 // The transaction started synchronously, but we need to notify the 497 // URLRequest delegate via the message loop. 498 MessageLoop::current()->PostTask( 499 FROM_HERE, 500 base::Bind(&URLRequestHttpJob::OnStartCompleted, 501 weak_factory_.GetWeakPtr(), rv)); 502} 503 504void URLRequestHttpJob::AddExtraHeaders() { 505 // Supply Accept-Encoding field only if it is not already provided. 506 // It should be provided IF the content is known to have restrictions on 507 // potential encoding, such as streaming multi-media. 508 // For details see bug 47381. 509 // TODO(jar, enal): jpeg files etc. should set up a request header if 510 // possible. Right now it is done only by buffered_resource_loader and 511 // simple_data_source. 512 if (!request_info_.extra_headers.HasHeader( 513 HttpRequestHeaders::kAcceptEncoding)) { 514 bool advertise_sdch = SdchManager::Global() && 515 SdchManager::Global()->IsInSupportedDomain(request_->url()); 516 std::string avail_dictionaries; 517 if (advertise_sdch) { 518 SdchManager::Global()->GetAvailDictionaryList(request_->url(), 519 &avail_dictionaries); 520 521 // The AllowLatencyExperiment() is only true if we've successfully done a 522 // full SDCH compression recently in this browser session for this host. 523 // Note that for this path, there might be no applicable dictionaries, 524 // and hence we can't participate in the experiment. 525 if (!avail_dictionaries.empty() && 526 SdchManager::Global()->AllowLatencyExperiment(request_->url())) { 527 // We are participating in the test (or control), and hence we'll 528 // eventually record statistics via either SDCH_EXPERIMENT_DECODE or 529 // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data. 530 packet_timing_enabled_ = true; 531 if (base::RandDouble() < .01) { 532 sdch_test_control_ = true; // 1% probability. 533 advertise_sdch = false; 534 } else { 535 sdch_test_activated_ = true; 536 } 537 } 538 } 539 540 // Supply Accept-Encoding headers first so that it is more likely that they 541 // will be in the first transmitted packet. This can sometimes make it 542 // easier to filter and analyze the streams to assure that a proxy has not 543 // damaged these headers. Some proxies deliberately corrupt Accept-Encoding 544 // headers. 545 if (!advertise_sdch) { 546 // Tell the server what compression formats we support (other than SDCH). 547 request_info_.extra_headers.SetHeader( 548 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate"); 549 } else { 550 // Include SDCH in acceptable list. 551 request_info_.extra_headers.SetHeader( 552 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch"); 553 if (!avail_dictionaries.empty()) { 554 request_info_.extra_headers.SetHeader( 555 kAvailDictionaryHeader, 556 avail_dictionaries); 557 sdch_dictionary_advertised_ = true; 558 // Since we're tagging this transaction as advertising a dictionary, 559 // we'll definitely employ an SDCH filter (or tentative sdch filter) 560 // when we get a response. When done, we'll record histograms via 561 // SDCH_DECODE or SDCH_PASSTHROUGH. Hence we need to record packet 562 // arrival times. 563 packet_timing_enabled_ = true; 564 } 565 } 566 } 567 568 if (http_user_agent_settings_) { 569 // Only add default Accept-Language if the request didn't have it 570 // specified. 571 std::string accept_language = 572 http_user_agent_settings_->GetAcceptLanguage(); 573 if (!accept_language.empty()) { 574 request_info_.extra_headers.SetHeaderIfMissing( 575 HttpRequestHeaders::kAcceptLanguage, 576 accept_language); 577 } 578 } 579} 580 581void URLRequestHttpJob::AddCookieHeaderAndStart() { 582 // No matter what, we want to report our status as IO pending since we will 583 // be notifying our consumer asynchronously via OnStartCompleted. 584 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 585 586 // If the request was destroyed, then there is no more work to do. 587 if (!request_) 588 return; 589 590 CookieStore* cookie_store = request_->context()->cookie_store(); 591 if (cookie_store && !(request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES)) { 592 net::CookieMonster* cookie_monster = cookie_store->GetCookieMonster(); 593 if (cookie_monster) { 594 cookie_monster->GetAllCookiesForURLAsync( 595 request_->url(), 596 base::Bind(&URLRequestHttpJob::CheckCookiePolicyAndLoad, 597 weak_factory_.GetWeakPtr())); 598 } else { 599 CheckCookiePolicyAndLoad(CookieList()); 600 } 601 } else { 602 DoStartTransaction(); 603 } 604} 605 606void URLRequestHttpJob::DoLoadCookies() { 607 CookieOptions options; 608 options.set_include_httponly(); 609 request_->context()->cookie_store()->GetCookiesWithOptionsAsync( 610 request_->url(), options, 611 base::Bind(&URLRequestHttpJob::OnCookiesLoaded, 612 weak_factory_.GetWeakPtr())); 613} 614 615void URLRequestHttpJob::CheckCookiePolicyAndLoad( 616 const CookieList& cookie_list) { 617 if (CanGetCookies(cookie_list)) 618 DoLoadCookies(); 619 else 620 DoStartTransaction(); 621} 622 623void URLRequestHttpJob::OnCookiesLoaded(const std::string& cookie_line) { 624 if (!cookie_line.empty()) { 625 request_info_.extra_headers.SetHeader( 626 HttpRequestHeaders::kCookie, cookie_line); 627 } 628 DoStartTransaction(); 629} 630 631void URLRequestHttpJob::DoStartTransaction() { 632 // We may have been canceled while retrieving cookies. 633 if (GetStatus().is_success()) { 634 StartTransaction(); 635 } else { 636 NotifyCanceled(); 637 } 638} 639 640void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete(int result) { 641 if (result != net::OK) { 642 std::string source("delegate"); 643 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 644 NetLog::StringCallback("source", &source)); 645 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 646 return; 647 } 648 649 DCHECK(transaction_.get()); 650 651 const HttpResponseInfo* response_info = transaction_->GetResponseInfo(); 652 DCHECK(response_info); 653 654 response_cookies_.clear(); 655 response_cookies_save_index_ = 0; 656 657 FetchResponseCookies(&response_cookies_); 658 659 if (!GetResponseHeaders()->GetDateValue(&response_date_)) 660 response_date_ = base::Time(); 661 662 // Now, loop over the response cookies, and attempt to persist each. 663 SaveNextCookie(); 664} 665 666// If the save occurs synchronously, SaveNextCookie will loop and save the next 667// cookie. If the save is deferred, the callback is responsible for continuing 668// to iterate through the cookies. 669// TODO(erikwright): Modify the CookieStore API to indicate via return value 670// whether it completed synchronously or asynchronously. 671// See http://crbug.com/131066. 672void URLRequestHttpJob::SaveNextCookie() { 673 // No matter what, we want to report our status as IO pending since we will 674 // be notifying our consumer asynchronously via OnStartCompleted. 675 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 676 677 // Used to communicate with the callback. See the implementation of 678 // OnCookieSaved. 679 scoped_refptr<SharedBoolean> callback_pending = new SharedBoolean(false); 680 scoped_refptr<SharedBoolean> save_next_cookie_running = 681 new SharedBoolean(true); 682 683 if (!(request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) && 684 request_->context()->cookie_store() && 685 response_cookies_.size() > 0) { 686 CookieOptions options; 687 options.set_include_httponly(); 688 options.set_server_time(response_date_); 689 690 net::CookieStore::SetCookiesCallback callback( 691 base::Bind(&URLRequestHttpJob::OnCookieSaved, 692 weak_factory_.GetWeakPtr(), 693 save_next_cookie_running, 694 callback_pending)); 695 696 // Loop through the cookies as long as SetCookieWithOptionsAsync completes 697 // synchronously. 698 while (!callback_pending->data && 699 response_cookies_save_index_ < response_cookies_.size()) { 700 if (CanSetCookie( 701 response_cookies_[response_cookies_save_index_], &options)) { 702 callback_pending->data = true; 703 request_->context()->cookie_store()->SetCookieWithOptionsAsync( 704 request_->url(), response_cookies_[response_cookies_save_index_], 705 options, callback); 706 } 707 ++response_cookies_save_index_; 708 } 709 } 710 711 save_next_cookie_running->data = false; 712 713 if (!callback_pending->data) { 714 response_cookies_.clear(); 715 response_cookies_save_index_ = 0; 716 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status 717 NotifyHeadersComplete(); 718 return; 719 } 720} 721 722// |save_next_cookie_running| is true when the callback is bound and set to 723// false when SaveNextCookie exits, allowing the callback to determine if the 724// save occurred synchronously or asynchronously. 725// |callback_pending| is false when the callback is invoked and will be set to 726// true by the callback, allowing SaveNextCookie to detect whether the save 727// occurred synchronously. 728// See SaveNextCookie() for more information. 729void URLRequestHttpJob::OnCookieSaved( 730 scoped_refptr<SharedBoolean> save_next_cookie_running, 731 scoped_refptr<SharedBoolean> callback_pending, 732 bool cookie_status) { 733 callback_pending->data = false; 734 735 // If we were called synchronously, return. 736 if (save_next_cookie_running->data) { 737 return; 738 } 739 740 // We were called asynchronously, so trigger the next save. 741 // We may have been canceled within OnSetCookie. 742 if (GetStatus().is_success()) { 743 SaveNextCookie(); 744 } else { 745 NotifyCanceled(); 746 } 747} 748 749void URLRequestHttpJob::FetchResponseCookies( 750 std::vector<std::string>* cookies) { 751 const std::string name = "Set-Cookie"; 752 std::string value; 753 754 void* iter = NULL; 755 HttpResponseHeaders* headers = GetResponseHeaders(); 756 while (headers->EnumerateHeader(&iter, name, &value)) { 757 if (!value.empty()) 758 cookies->push_back(value); 759 } 760} 761 762// NOTE: |ProcessStrictTransportSecurityHeader| and 763// |ProcessPublicKeyPinsHeader| have very similar structures, by design. 764void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() { 765 DCHECK(response_info_); 766 TransportSecurityState* security_state = 767 request_->context()->transport_security_state(); 768 const SSLInfo& ssl_info = response_info_->ssl_info; 769 770 // Only accept HSTS headers on HTTPS connections that have no 771 // certificate errors. 772 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) || 773 !security_state) 774 return; 775 776 // http://tools.ietf.org/html/draft-ietf-websec-strict-transport-sec: 777 // 778 // If a UA receives more than one STS header field in a HTTP response 779 // message over secure transport, then the UA MUST process only the 780 // first such header field. 781 HttpResponseHeaders* headers = GetResponseHeaders(); 782 std::string value; 783 if (headers->EnumerateHeader(NULL, "Strict-Transport-Security", &value)) 784 security_state->AddHSTSHeader(request_info_.url.host(), value); 785} 786 787void URLRequestHttpJob::ProcessPublicKeyPinsHeader() { 788 DCHECK(response_info_); 789 TransportSecurityState* security_state = 790 request_->context()->transport_security_state(); 791 const SSLInfo& ssl_info = response_info_->ssl_info; 792 793 // Only accept HPKP headers on HTTPS connections that have no 794 // certificate errors. 795 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) || 796 !security_state) 797 return; 798 799 // http://tools.ietf.org/html/draft-ietf-websec-key-pinning: 800 // 801 // If a UA receives more than one PKP header field in an HTTP 802 // response message over secure transport, then the UA MUST process 803 // only the first such header field. 804 HttpResponseHeaders* headers = GetResponseHeaders(); 805 std::string value; 806 if (headers->EnumerateHeader(NULL, "Public-Key-Pins", &value)) 807 security_state->AddHPKPHeader(request_info_.url.host(), value, ssl_info); 808} 809 810void URLRequestHttpJob::OnStartCompleted(int result) { 811 RecordTimer(); 812 813 // If the request was destroyed, then there is no more work to do. 814 if (!request_) 815 return; 816 817 // If the transaction was destroyed, then the job was cancelled, and 818 // we can just ignore this notification. 819 if (!transaction_.get()) 820 return; 821 822 // Clear the IO_PENDING status 823 SetStatus(URLRequestStatus()); 824 825 const URLRequestContext* context = request_->context(); 826 827 if (result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN && 828 transaction_->GetResponseInfo() != NULL) { 829 FraudulentCertificateReporter* reporter = 830 context->fraudulent_certificate_reporter(); 831 if (reporter != NULL) { 832 const SSLInfo& ssl_info = transaction_->GetResponseInfo()->ssl_info; 833 bool sni_available = SSLConfigService::IsSNIAvailable( 834 context->ssl_config_service()); 835 const std::string& host = request_->url().host(); 836 837 reporter->SendReport(host, ssl_info, sni_available); 838 } 839 } 840 841 if (result == OK) { 842 scoped_refptr<HttpResponseHeaders> headers = GetResponseHeaders(); 843 if (network_delegate()) { 844 // Note that |this| may not be deleted until 845 // |on_headers_received_callback_| or 846 // |NetworkDelegate::URLRequestDestroyed()| has been called. 847 int error = network_delegate()->NotifyHeadersReceived( 848 request_, on_headers_received_callback_, 849 headers, &override_response_headers_); 850 if (error != net::OK) { 851 if (error == net::ERR_IO_PENDING) { 852 awaiting_callback_ = true; 853 SetBlockedOnDelegate(); 854 } else { 855 std::string source("delegate"); 856 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 857 NetLog::StringCallback("source", 858 &source)); 859 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, error)); 860 } 861 return; 862 } 863 } 864 865 SaveCookiesAndNotifyHeadersComplete(net::OK); 866 } else if (IsCertificateError(result)) { 867 // We encountered an SSL certificate error. Ask our delegate to decide 868 // what we should do. 869 870 TransportSecurityState::DomainState domain_state; 871 const URLRequestContext* context = request_->context(); 872 const bool fatal = context->transport_security_state() && 873 context->transport_security_state()->GetDomainState( 874 request_info_.url.host(), 875 SSLConfigService::IsSNIAvailable(context->ssl_config_service()), 876 &domain_state) && 877 domain_state.ShouldSSLErrorsBeFatal(); 878 NotifySSLCertificateError(transaction_->GetResponseInfo()->ssl_info, fatal); 879 } else if (result == ERR_SSL_CLIENT_AUTH_CERT_NEEDED) { 880 NotifyCertificateRequested( 881 transaction_->GetResponseInfo()->cert_request_info); 882 } else { 883 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 884 } 885} 886 887void URLRequestHttpJob::OnHeadersReceivedCallback(int result) { 888 SetUnblockedOnDelegate(); 889 awaiting_callback_ = false; 890 891 // Check that there are no callbacks to already canceled requests. 892 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); 893 894 SaveCookiesAndNotifyHeadersComplete(result); 895} 896 897void URLRequestHttpJob::OnReadCompleted(int result) { 898 read_in_progress_ = false; 899 900 if (ShouldFixMismatchedContentLength(result)) 901 result = OK; 902 903 if (result == OK) { 904 NotifyDone(URLRequestStatus()); 905 } else if (result < 0) { 906 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); 907 } else { 908 // Clear the IO_PENDING status 909 SetStatus(URLRequestStatus()); 910 } 911 912 NotifyReadComplete(result); 913} 914 915void URLRequestHttpJob::RestartTransactionWithAuth( 916 const AuthCredentials& credentials) { 917 auth_credentials_ = credentials; 918 919 // These will be reset in OnStartCompleted. 920 response_info_ = NULL; 921 response_cookies_.clear(); 922 923 ResetTimer(); 924 925 // Update the cookies, since the cookie store may have been updated from the 926 // headers in the 401/407. Since cookies were already appended to 927 // extra_headers, we need to strip them out before adding them again. 928 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kCookie); 929 930 AddCookieHeaderAndStart(); 931} 932 933void URLRequestHttpJob::SetUpload(UploadDataStream* upload) { 934 DCHECK(!transaction_.get()) << "cannot change once started"; 935 request_info_.upload_data_stream = upload; 936} 937 938void URLRequestHttpJob::SetExtraRequestHeaders( 939 const HttpRequestHeaders& headers) { 940 DCHECK(!transaction_.get()) << "cannot change once started"; 941 request_info_.extra_headers.CopyFrom(headers); 942} 943 944LoadState URLRequestHttpJob::GetLoadState() const { 945 return transaction_.get() ? 946 transaction_->GetLoadState() : LOAD_STATE_IDLE; 947} 948 949UploadProgress URLRequestHttpJob::GetUploadProgress() const { 950 return transaction_.get() ? 951 transaction_->GetUploadProgress() : UploadProgress(); 952} 953 954bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const { 955 DCHECK(transaction_.get()); 956 957 if (!response_info_) 958 return false; 959 960 return GetResponseHeaders()->GetMimeType(mime_type); 961} 962 963bool URLRequestHttpJob::GetCharset(std::string* charset) { 964 DCHECK(transaction_.get()); 965 966 if (!response_info_) 967 return false; 968 969 return GetResponseHeaders()->GetCharset(charset); 970} 971 972void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) { 973 DCHECK(request_); 974 DCHECK(transaction_.get()); 975 976 if (response_info_) { 977 *info = *response_info_; 978 if (override_response_headers_) 979 info->headers = override_response_headers_; 980 } 981} 982 983void URLRequestHttpJob::GetLoadTimingInfo( 984 LoadTimingInfo* load_timing_info) const { 985 if (transaction_) 986 transaction_->GetLoadTimingInfo(load_timing_info); 987} 988 989bool URLRequestHttpJob::GetResponseCookies(std::vector<std::string>* cookies) { 990 DCHECK(transaction_.get()); 991 992 if (!response_info_) 993 return false; 994 995 // TODO(darin): Why are we extracting response cookies again? Perhaps we 996 // should just leverage response_cookies_. 997 998 cookies->clear(); 999 FetchResponseCookies(cookies); 1000 return true; 1001} 1002 1003int URLRequestHttpJob::GetResponseCode() const { 1004 DCHECK(transaction_.get()); 1005 1006 if (!response_info_) 1007 return -1; 1008 1009 return GetResponseHeaders()->response_code(); 1010} 1011 1012Filter* URLRequestHttpJob::SetupFilter() const { 1013 DCHECK(transaction_.get()); 1014 if (!response_info_) 1015 return NULL; 1016 1017 std::vector<Filter::FilterType> encoding_types; 1018 std::string encoding_type; 1019 HttpResponseHeaders* headers = GetResponseHeaders(); 1020 void* iter = NULL; 1021 while (headers->EnumerateHeader(&iter, "Content-Encoding", &encoding_type)) { 1022 encoding_types.push_back(Filter::ConvertEncodingToType(encoding_type)); 1023 } 1024 1025 if (filter_context_->IsSdchResponse()) { 1026 // We are wary of proxies that discard or damage SDCH encoding. If a server 1027 // explicitly states that this is not SDCH content, then we can correct our 1028 // assumption that this is an SDCH response, and avoid the need to recover 1029 // as though the content is corrupted (when we discover it is not SDCH 1030 // encoded). 1031 std::string sdch_response_status; 1032 iter = NULL; 1033 while (headers->EnumerateHeader(&iter, "X-Sdch-Encode", 1034 &sdch_response_status)) { 1035 if (sdch_response_status == "0") { 1036 filter_context_->ResetSdchResponseToFalse(); 1037 break; 1038 } 1039 } 1040 } 1041 1042 // Even if encoding types are empty, there is a chance that we need to add 1043 // some decoding, as some proxies strip encoding completely. In such cases, 1044 // we may need to add (for example) SDCH filtering (when the context suggests 1045 // it is appropriate). 1046 Filter::FixupEncodingTypes(*filter_context_, &encoding_types); 1047 1048 return !encoding_types.empty() 1049 ? Filter::Factory(encoding_types, *filter_context_) : NULL; 1050} 1051 1052bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) { 1053 // HTTP is always safe. 1054 // TODO(pauljensen): Remove once crbug.com/146591 is fixed. 1055 if (location.is_valid() && 1056 (location.scheme() == "http" || location.scheme() == "https")) { 1057 return true; 1058 } 1059 // Query URLRequestJobFactory as to whether |location| would be safe to 1060 // redirect to. 1061 return request_->context()->job_factory() && 1062 request_->context()->job_factory()->IsSafeRedirectTarget(location); 1063} 1064 1065bool URLRequestHttpJob::NeedsAuth() { 1066 int code = GetResponseCode(); 1067 if (code == -1) 1068 return false; 1069 1070 // Check if we need either Proxy or WWW Authentication. This could happen 1071 // because we either provided no auth info, or provided incorrect info. 1072 switch (code) { 1073 case 407: 1074 if (proxy_auth_state_ == AUTH_STATE_CANCELED) 1075 return false; 1076 proxy_auth_state_ = AUTH_STATE_NEED_AUTH; 1077 return true; 1078 case 401: 1079 if (server_auth_state_ == AUTH_STATE_CANCELED) 1080 return false; 1081 server_auth_state_ = AUTH_STATE_NEED_AUTH; 1082 return true; 1083 } 1084 return false; 1085} 1086 1087void URLRequestHttpJob::GetAuthChallengeInfo( 1088 scoped_refptr<AuthChallengeInfo>* result) { 1089 DCHECK(transaction_.get()); 1090 DCHECK(response_info_); 1091 1092 // sanity checks: 1093 DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH || 1094 server_auth_state_ == AUTH_STATE_NEED_AUTH); 1095 DCHECK((GetResponseHeaders()->response_code() == HTTP_UNAUTHORIZED) || 1096 (GetResponseHeaders()->response_code() == 1097 HTTP_PROXY_AUTHENTICATION_REQUIRED)); 1098 1099 *result = response_info_->auth_challenge; 1100} 1101 1102void URLRequestHttpJob::SetAuth(const AuthCredentials& credentials) { 1103 DCHECK(transaction_.get()); 1104 1105 // Proxy gets set first, then WWW. 1106 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { 1107 proxy_auth_state_ = AUTH_STATE_HAVE_AUTH; 1108 } else { 1109 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); 1110 server_auth_state_ = AUTH_STATE_HAVE_AUTH; 1111 } 1112 1113 RestartTransactionWithAuth(credentials); 1114} 1115 1116void URLRequestHttpJob::CancelAuth() { 1117 // Proxy gets set first, then WWW. 1118 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { 1119 proxy_auth_state_ = AUTH_STATE_CANCELED; 1120 } else { 1121 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); 1122 server_auth_state_ = AUTH_STATE_CANCELED; 1123 } 1124 1125 // These will be reset in OnStartCompleted. 1126 response_info_ = NULL; 1127 response_cookies_.clear(); 1128 1129 ResetTimer(); 1130 1131 // OK, let the consumer read the error page... 1132 // 1133 // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false, 1134 // which will cause the consumer to receive OnResponseStarted instead of 1135 // OnAuthRequired. 1136 // 1137 // We have to do this via InvokeLater to avoid "recursing" the consumer. 1138 // 1139 MessageLoop::current()->PostTask( 1140 FROM_HERE, 1141 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1142 weak_factory_.GetWeakPtr(), OK)); 1143} 1144 1145void URLRequestHttpJob::ContinueWithCertificate( 1146 X509Certificate* client_cert) { 1147 DCHECK(transaction_.get()); 1148 1149 DCHECK(!response_info_) << "should not have a response yet"; 1150 1151 ResetTimer(); 1152 1153 // No matter what, we want to report our status as IO pending since we will 1154 // be notifying our consumer asynchronously via OnStartCompleted. 1155 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1156 1157 int rv = transaction_->RestartWithCertificate(client_cert, start_callback_); 1158 if (rv == ERR_IO_PENDING) 1159 return; 1160 1161 // The transaction started synchronously, but we need to notify the 1162 // URLRequest delegate via the message loop. 1163 MessageLoop::current()->PostTask( 1164 FROM_HERE, 1165 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1166 weak_factory_.GetWeakPtr(), rv)); 1167} 1168 1169void URLRequestHttpJob::ContinueDespiteLastError() { 1170 // If the transaction was destroyed, then the job was cancelled. 1171 if (!transaction_.get()) 1172 return; 1173 1174 DCHECK(!response_info_) << "should not have a response yet"; 1175 1176 ResetTimer(); 1177 1178 // No matter what, we want to report our status as IO pending since we will 1179 // be notifying our consumer asynchronously via OnStartCompleted. 1180 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1181 1182 int rv = transaction_->RestartIgnoringLastError(start_callback_); 1183 if (rv == ERR_IO_PENDING) 1184 return; 1185 1186 // The transaction started synchronously, but we need to notify the 1187 // URLRequest delegate via the message loop. 1188 MessageLoop::current()->PostTask( 1189 FROM_HERE, 1190 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1191 weak_factory_.GetWeakPtr(), rv)); 1192} 1193 1194bool URLRequestHttpJob::ShouldFixMismatchedContentLength(int rv) const { 1195 // Some servers send the body compressed, but specify the content length as 1196 // the uncompressed size. Although this violates the HTTP spec we want to 1197 // support it (as IE and FireFox do), but *only* for an exact match. 1198 // See http://crbug.com/79694. 1199 if (rv == net::ERR_CONTENT_LENGTH_MISMATCH || 1200 rv == net::ERR_INCOMPLETE_CHUNKED_ENCODING) { 1201 if (request_ && request_->response_headers()) { 1202 int64 expected_length = request_->response_headers()->GetContentLength(); 1203 VLOG(1) << __FUNCTION__ << "() " 1204 << "\"" << request_->url().spec() << "\"" 1205 << " content-length = " << expected_length 1206 << " pre total = " << prefilter_bytes_read() 1207 << " post total = " << postfilter_bytes_read(); 1208 if (postfilter_bytes_read() == expected_length) { 1209 // Clear the error. 1210 return true; 1211 } 1212 } 1213 } 1214 return false; 1215} 1216 1217bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size, 1218 int* bytes_read) { 1219 DCHECK_NE(buf_size, 0); 1220 DCHECK(bytes_read); 1221 DCHECK(!read_in_progress_); 1222 1223 int rv = transaction_->Read( 1224 buf, buf_size, 1225 base::Bind(&URLRequestHttpJob::OnReadCompleted, base::Unretained(this))); 1226 1227 if (ShouldFixMismatchedContentLength(rv)) 1228 rv = 0; 1229 1230 if (rv >= 0) { 1231 *bytes_read = rv; 1232 if (!rv) 1233 DoneWithRequest(FINISHED); 1234 return true; 1235 } 1236 1237 if (rv == ERR_IO_PENDING) { 1238 read_in_progress_ = true; 1239 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1240 } else { 1241 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); 1242 } 1243 1244 return false; 1245} 1246 1247void URLRequestHttpJob::StopCaching() { 1248 if (transaction_.get()) 1249 transaction_->StopCaching(); 1250} 1251 1252void URLRequestHttpJob::DoneReading() { 1253 if (transaction_.get()) 1254 transaction_->DoneReading(); 1255 DoneWithRequest(FINISHED); 1256} 1257 1258HostPortPair URLRequestHttpJob::GetSocketAddress() const { 1259 return response_info_ ? response_info_->socket_address : HostPortPair(); 1260} 1261 1262void URLRequestHttpJob::RecordTimer() { 1263 if (request_creation_time_.is_null()) { 1264 NOTREACHED() 1265 << "The same transaction shouldn't start twice without new timing."; 1266 return; 1267 } 1268 1269 base::TimeDelta to_start = base::Time::Now() - request_creation_time_; 1270 request_creation_time_ = base::Time(); 1271 1272 UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte", to_start); 1273} 1274 1275void URLRequestHttpJob::ResetTimer() { 1276 if (!request_creation_time_.is_null()) { 1277 NOTREACHED() 1278 << "The timer was reset before it was recorded."; 1279 return; 1280 } 1281 request_creation_time_ = base::Time::Now(); 1282} 1283 1284void URLRequestHttpJob::UpdatePacketReadTimes() { 1285 if (!packet_timing_enabled_) 1286 return; 1287 1288 if (filter_input_byte_count() <= bytes_observed_in_packets_) { 1289 DCHECK_EQ(filter_input_byte_count(), bytes_observed_in_packets_); 1290 return; // No new bytes have arrived. 1291 } 1292 1293 final_packet_time_ = base::Time::Now(); 1294 if (!bytes_observed_in_packets_) 1295 request_time_snapshot_ = request_ ? request_->request_time() : base::Time(); 1296 1297 bytes_observed_in_packets_ = filter_input_byte_count(); 1298} 1299 1300void URLRequestHttpJob::RecordPacketStats( 1301 FilterContext::StatisticSelector statistic) const { 1302 if (!packet_timing_enabled_ || (final_packet_time_ == base::Time())) 1303 return; 1304 1305 base::TimeDelta duration = final_packet_time_ - request_time_snapshot_; 1306 switch (statistic) { 1307 case FilterContext::SDCH_DECODE: { 1308 UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b", 1309 static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100); 1310 return; 1311 } 1312 case FilterContext::SDCH_PASSTHROUGH: { 1313 // Despite advertising a dictionary, we handled non-sdch compressed 1314 // content. 1315 return; 1316 } 1317 1318 case FilterContext::SDCH_EXPERIMENT_DECODE: { 1319 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Decode", 1320 duration, 1321 base::TimeDelta::FromMilliseconds(20), 1322 base::TimeDelta::FromMinutes(10), 100); 1323 return; 1324 } 1325 case FilterContext::SDCH_EXPERIMENT_HOLDBACK: { 1326 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Holdback", 1327 duration, 1328 base::TimeDelta::FromMilliseconds(20), 1329 base::TimeDelta::FromMinutes(10), 100); 1330 return; 1331 } 1332 default: 1333 NOTREACHED(); 1334 return; 1335 } 1336} 1337 1338// The common type of histogram we use for all compression-tracking histograms. 1339#define COMPRESSION_HISTOGRAM(name, sample) \ 1340 do { \ 1341 UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \ 1342 500, 1000000, 100); \ 1343 } while (0) 1344 1345void URLRequestHttpJob::RecordCompressionHistograms() { 1346 DCHECK(request_); 1347 if (!request_) 1348 return; 1349 1350 if (is_cached_content_ || // Don't record cached content 1351 !GetStatus().is_success() || // Don't record failed content 1352 !IsCompressibleContent() || // Only record compressible content 1353 !prefilter_bytes_read()) // Zero-byte responses aren't useful. 1354 return; 1355 1356 // Miniature requests aren't really compressible. Don't count them. 1357 const int kMinSize = 16; 1358 if (prefilter_bytes_read() < kMinSize) 1359 return; 1360 1361 // Only record for http or https urls. 1362 bool is_http = request_->url().SchemeIs("http"); 1363 bool is_https = request_->url().SchemeIs("https"); 1364 if (!is_http && !is_https) 1365 return; 1366 1367 int compressed_B = prefilter_bytes_read(); 1368 int decompressed_B = postfilter_bytes_read(); 1369 bool was_filtered = HasFilter(); 1370 1371 // We want to record how often downloaded resources are compressed. 1372 // But, we recognize that different protocols may have different 1373 // properties. So, for each request, we'll put it into one of 3 1374 // groups: 1375 // a) SSL resources 1376 // Proxies cannot tamper with compression headers with SSL. 1377 // b) Non-SSL, loaded-via-proxy resources 1378 // In this case, we know a proxy might have interfered. 1379 // c) Non-SSL, loaded-without-proxy resources 1380 // In this case, we know there was no explicit proxy. However, 1381 // it is possible that a transparent proxy was still interfering. 1382 // 1383 // For each group, we record the same 3 histograms. 1384 1385 if (is_https) { 1386 if (was_filtered) { 1387 COMPRESSION_HISTOGRAM("SSL.BytesBeforeCompression", compressed_B); 1388 COMPRESSION_HISTOGRAM("SSL.BytesAfterCompression", decompressed_B); 1389 } else { 1390 COMPRESSION_HISTOGRAM("SSL.ShouldHaveBeenCompressed", decompressed_B); 1391 } 1392 return; 1393 } 1394 1395 if (request_->was_fetched_via_proxy()) { 1396 if (was_filtered) { 1397 COMPRESSION_HISTOGRAM("Proxy.BytesBeforeCompression", compressed_B); 1398 COMPRESSION_HISTOGRAM("Proxy.BytesAfterCompression", decompressed_B); 1399 } else { 1400 COMPRESSION_HISTOGRAM("Proxy.ShouldHaveBeenCompressed", decompressed_B); 1401 } 1402 return; 1403 } 1404 1405 if (was_filtered) { 1406 COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B); 1407 COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B); 1408 } else { 1409 COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B); 1410 } 1411} 1412 1413bool URLRequestHttpJob::IsCompressibleContent() const { 1414 std::string mime_type; 1415 return GetMimeType(&mime_type) && 1416 (IsSupportedJavascriptMimeType(mime_type.c_str()) || 1417 IsSupportedNonImageMimeType(mime_type.c_str())); 1418} 1419 1420void URLRequestHttpJob::RecordPerfHistograms(CompletionCause reason) { 1421 if (start_time_.is_null()) 1422 return; 1423 1424 base::TimeDelta total_time = base::TimeTicks::Now() - start_time_; 1425 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTime", total_time); 1426 1427 if (reason == FINISHED) { 1428 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeSuccess", total_time); 1429 } else { 1430 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCancel", total_time); 1431 } 1432 1433 if (response_info_) { 1434 if (response_info_->was_cached) { 1435 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCached", total_time); 1436 } else { 1437 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeNotCached", total_time); 1438 } 1439 } 1440 1441 start_time_ = base::TimeTicks(); 1442} 1443 1444void URLRequestHttpJob::DoneWithRequest(CompletionCause reason) { 1445 if (done_) 1446 return; 1447 done_ = true; 1448 RecordPerfHistograms(reason); 1449 if (reason == FINISHED) { 1450 request_->set_received_response_content_length(prefilter_bytes_read()); 1451 RecordCompressionHistograms(); 1452 } 1453} 1454 1455HttpResponseHeaders* URLRequestHttpJob::GetResponseHeaders() const { 1456 DCHECK(transaction_.get()); 1457 DCHECK(transaction_->GetResponseInfo()); 1458 return override_response_headers_.get() ? 1459 override_response_headers_ : 1460 transaction_->GetResponseInfo()->headers; 1461} 1462 1463void URLRequestHttpJob::NotifyURLRequestDestroyed() { 1464 awaiting_callback_ = false; 1465} 1466 1467void URLRequestHttpJob::OnDetachRequest() { 1468 http_transaction_delegate_->OnDetachRequest(); 1469} 1470 1471} // namespace net 1472