url_request_http_job.cc revision 868fa2fe829687343ffae624259930155e16dbd8
1// Copyright (c) 2012 The Chromium Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "net/url_request/url_request_http_job.h" 6 7#include "base/base_switches.h" 8#include "base/bind.h" 9#include "base/bind_helpers.h" 10#include "base/command_line.h" 11#include "base/compiler_specific.h" 12#include "base/file_version_info.h" 13#include "base/message_loop.h" 14#include "base/metrics/field_trial.h" 15#include "base/metrics/histogram.h" 16#include "base/rand_util.h" 17#include "base/strings/string_util.h" 18#include "base/time.h" 19#include "net/base/filter.h" 20#include "net/base/host_port_pair.h" 21#include "net/base/load_flags.h" 22#include "net/base/mime_util.h" 23#include "net/base/net_errors.h" 24#include "net/base/net_util.h" 25#include "net/base/network_delegate.h" 26#include "net/base/sdch_manager.h" 27#include "net/cert/cert_status_flags.h" 28#include "net/cookies/cookie_monster.h" 29#include "net/http/http_network_session.h" 30#include "net/http/http_request_headers.h" 31#include "net/http/http_response_headers.h" 32#include "net/http/http_response_info.h" 33#include "net/http/http_status_code.h" 34#include "net/http/http_transaction.h" 35#include "net/http/http_transaction_delegate.h" 36#include "net/http/http_transaction_factory.h" 37#include "net/http/http_util.h" 38#include "net/ssl/ssl_cert_request_info.h" 39#include "net/ssl/ssl_config_service.h" 40#include "net/url_request/fraudulent_certificate_reporter.h" 41#include "net/url_request/http_user_agent_settings.h" 42#include "net/url_request/url_request.h" 43#include "net/url_request/url_request_context.h" 44#include "net/url_request/url_request_error_job.h" 45#include "net/url_request/url_request_job_factory.h" 46#include "net/url_request/url_request_redirect_job.h" 47#include "net/url_request/url_request_throttler_header_adapter.h" 48#include "net/url_request/url_request_throttler_manager.h" 49 50static const char kAvailDictionaryHeader[] = "Avail-Dictionary"; 51 52namespace net { 53 54class URLRequestHttpJob::HttpFilterContext : public FilterContext { 55 public: 56 explicit HttpFilterContext(URLRequestHttpJob* job); 57 virtual ~HttpFilterContext(); 58 59 // FilterContext implementation. 60 virtual bool GetMimeType(std::string* mime_type) const OVERRIDE; 61 virtual bool GetURL(GURL* gurl) const OVERRIDE; 62 virtual base::Time GetRequestTime() const OVERRIDE; 63 virtual bool IsCachedContent() const OVERRIDE; 64 virtual bool IsDownload() const OVERRIDE; 65 virtual bool IsSdchResponse() const OVERRIDE; 66 virtual int64 GetByteReadCount() const OVERRIDE; 67 virtual int GetResponseCode() const OVERRIDE; 68 virtual void RecordPacketStats(StatisticSelector statistic) const OVERRIDE; 69 70 // Method to allow us to reset filter context for a response that should have 71 // been SDCH encoded when there is an update due to an explicit HTTP header. 72 void ResetSdchResponseToFalse(); 73 74 private: 75 URLRequestHttpJob* job_; 76 77 DISALLOW_COPY_AND_ASSIGN(HttpFilterContext); 78}; 79 80class URLRequestHttpJob::HttpTransactionDelegateImpl 81 : public HttpTransactionDelegate { 82 public: 83 HttpTransactionDelegateImpl( 84 URLRequest* request, NetworkDelegate* network_delegate) 85 : request_(request), 86 network_delegate_(network_delegate), 87 cache_active_(false), 88 network_active_(false) { 89 } 90 virtual ~HttpTransactionDelegateImpl() { 91 OnDetachRequest(); 92 } 93 void OnDetachRequest() { 94 if (request_ == NULL || network_delegate_ == NULL) 95 return; 96 network_delegate_->NotifyRequestWaitStateChange( 97 *request_, 98 NetworkDelegate::REQUEST_WAIT_STATE_RESET); 99 cache_active_ = false; 100 network_active_ = false; 101 request_ = NULL; 102 } 103 virtual void OnCacheActionStart() OVERRIDE { 104 if (request_ == NULL || network_delegate_ == NULL) 105 return; 106 DCHECK(!cache_active_ && !network_active_); 107 cache_active_ = true; 108 network_delegate_->NotifyRequestWaitStateChange( 109 *request_, 110 NetworkDelegate::REQUEST_WAIT_STATE_CACHE_START); 111 } 112 virtual void OnCacheActionFinish() OVERRIDE { 113 if (request_ == NULL || network_delegate_ == NULL) 114 return; 115 DCHECK(cache_active_ && !network_active_); 116 cache_active_ = false; 117 network_delegate_->NotifyRequestWaitStateChange( 118 *request_, 119 NetworkDelegate::REQUEST_WAIT_STATE_CACHE_FINISH); 120 } 121 virtual void OnNetworkActionStart() OVERRIDE { 122 if (request_ == NULL || network_delegate_ == NULL) 123 return; 124 DCHECK(!cache_active_ && !network_active_); 125 network_active_ = true; 126 network_delegate_->NotifyRequestWaitStateChange( 127 *request_, 128 NetworkDelegate::REQUEST_WAIT_STATE_NETWORK_START); 129 } 130 virtual void OnNetworkActionFinish() OVERRIDE { 131 if (request_ == NULL || network_delegate_ == NULL) 132 return; 133 DCHECK(!cache_active_ && network_active_); 134 network_active_ = false; 135 network_delegate_->NotifyRequestWaitStateChange( 136 *request_, 137 NetworkDelegate::REQUEST_WAIT_STATE_NETWORK_FINISH); 138 } 139 private: 140 URLRequest* request_; 141 NetworkDelegate* network_delegate_; 142 bool cache_active_; 143 bool network_active_; 144}; 145 146URLRequestHttpJob::HttpFilterContext::HttpFilterContext(URLRequestHttpJob* job) 147 : job_(job) { 148 DCHECK(job_); 149} 150 151URLRequestHttpJob::HttpFilterContext::~HttpFilterContext() { 152} 153 154bool URLRequestHttpJob::HttpFilterContext::GetMimeType( 155 std::string* mime_type) const { 156 return job_->GetMimeType(mime_type); 157} 158 159bool URLRequestHttpJob::HttpFilterContext::GetURL(GURL* gurl) const { 160 if (!job_->request()) 161 return false; 162 *gurl = job_->request()->url(); 163 return true; 164} 165 166base::Time URLRequestHttpJob::HttpFilterContext::GetRequestTime() const { 167 return job_->request() ? job_->request()->request_time() : base::Time(); 168} 169 170bool URLRequestHttpJob::HttpFilterContext::IsCachedContent() const { 171 return job_->is_cached_content_; 172} 173 174bool URLRequestHttpJob::HttpFilterContext::IsDownload() const { 175 return (job_->request_info_.load_flags & LOAD_IS_DOWNLOAD) != 0; 176} 177 178void URLRequestHttpJob::HttpFilterContext::ResetSdchResponseToFalse() { 179 DCHECK(job_->sdch_dictionary_advertised_); 180 job_->sdch_dictionary_advertised_ = false; 181} 182 183bool URLRequestHttpJob::HttpFilterContext::IsSdchResponse() const { 184 return job_->sdch_dictionary_advertised_; 185} 186 187int64 URLRequestHttpJob::HttpFilterContext::GetByteReadCount() const { 188 return job_->filter_input_byte_count(); 189} 190 191int URLRequestHttpJob::HttpFilterContext::GetResponseCode() const { 192 return job_->GetResponseCode(); 193} 194 195void URLRequestHttpJob::HttpFilterContext::RecordPacketStats( 196 StatisticSelector statistic) const { 197 job_->RecordPacketStats(statistic); 198} 199 200// TODO(darin): make sure the port blocking code is not lost 201// static 202URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request, 203 NetworkDelegate* network_delegate, 204 const std::string& scheme) { 205 DCHECK(scheme == "http" || scheme == "https"); 206 207 if (!request->context()->http_transaction_factory()) { 208 NOTREACHED() << "requires a valid context"; 209 return new URLRequestErrorJob( 210 request, network_delegate, ERR_INVALID_ARGUMENT); 211 } 212 213 GURL redirect_url; 214 if (request->GetHSTSRedirect(&redirect_url)) { 215 return new URLRequestRedirectJob( 216 request, network_delegate, redirect_url, 217 // Use status code 307 to preserve the method, so POST requests work. 218 URLRequestRedirectJob::REDIRECT_307_TEMPORARY_REDIRECT); 219 } 220 return new URLRequestHttpJob(request, 221 network_delegate, 222 request->context()->http_user_agent_settings()); 223} 224 225 226URLRequestHttpJob::URLRequestHttpJob( 227 URLRequest* request, 228 NetworkDelegate* network_delegate, 229 const HttpUserAgentSettings* http_user_agent_settings) 230 : URLRequestJob(request, network_delegate), 231 priority_(DEFAULT_PRIORITY), 232 response_info_(NULL), 233 response_cookies_save_index_(0), 234 proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH), 235 server_auth_state_(AUTH_STATE_DONT_NEED_AUTH), 236 start_callback_(base::Bind( 237 &URLRequestHttpJob::OnStartCompleted, base::Unretained(this))), 238 notify_before_headers_sent_callback_(base::Bind( 239 &URLRequestHttpJob::NotifyBeforeSendHeadersCallback, 240 base::Unretained(this))), 241 read_in_progress_(false), 242 transaction_(NULL), 243 throttling_entry_(NULL), 244 sdch_dictionary_advertised_(false), 245 sdch_test_activated_(false), 246 sdch_test_control_(false), 247 is_cached_content_(false), 248 request_creation_time_(), 249 packet_timing_enabled_(false), 250 done_(false), 251 bytes_observed_in_packets_(0), 252 request_time_snapshot_(), 253 final_packet_time_(), 254 filter_context_(new HttpFilterContext(this)), 255 weak_factory_(this), 256 on_headers_received_callback_(base::Bind( 257 &URLRequestHttpJob::OnHeadersReceivedCallback, 258 base::Unretained(this))), 259 awaiting_callback_(false), 260 http_transaction_delegate_(new HttpTransactionDelegateImpl( 261 request, network_delegate)), 262 http_user_agent_settings_(http_user_agent_settings) { 263 URLRequestThrottlerManager* manager = request->context()->throttler_manager(); 264 if (manager) 265 throttling_entry_ = manager->RegisterRequestUrl(request->url()); 266 267 ResetTimer(); 268} 269 270URLRequestHttpJob::~URLRequestHttpJob() { 271 CHECK(!awaiting_callback_); 272 273 DCHECK(!sdch_test_control_ || !sdch_test_activated_); 274 if (!is_cached_content_) { 275 if (sdch_test_control_) 276 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK); 277 if (sdch_test_activated_) 278 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE); 279 } 280 // Make sure SDCH filters are told to emit histogram data while 281 // filter_context_ is still alive. 282 DestroyFilters(); 283 284 if (sdch_dictionary_url_.is_valid()) { 285 // Prior to reaching the destructor, request_ has been set to a NULL 286 // pointer, so request_->url() is no longer valid in the destructor, and we 287 // use an alternate copy |request_info_.url|. 288 SdchManager* manager = SdchManager::Global(); 289 // To be extra safe, since this is a "different time" from when we decided 290 // to get the dictionary, we'll validate that an SdchManager is available. 291 // At shutdown time, care is taken to be sure that we don't delete this 292 // globally useful instance "too soon," so this check is just defensive 293 // coding to assure that IF the system is shutting down, we don't have any 294 // problem if the manager was deleted ahead of time. 295 if (manager) // Defensive programming. 296 manager->FetchDictionary(request_info_.url, sdch_dictionary_url_); 297 } 298 DoneWithRequest(ABORTED); 299} 300 301void URLRequestHttpJob::SetPriority(RequestPriority priority) { 302 priority_ = priority; 303 if (transaction_) 304 transaction_->SetPriority(priority_); 305} 306 307void URLRequestHttpJob::Start() { 308 DCHECK(!transaction_.get()); 309 310 // URLRequest::SetReferrer ensures that we do not send username and password 311 // fields in the referrer. 312 GURL referrer(request_->referrer()); 313 314 request_info_.url = request_->url(); 315 request_info_.method = request_->method(); 316 request_info_.load_flags = request_->load_flags(); 317 request_info_.request_id = request_->identifier(); 318 // Enable privacy mode if cookie settings or flags tell us not send or 319 // save cookies. 320 bool enable_privacy_mode = 321 (request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES) || 322 (request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) || 323 CanEnablePrivacyMode(); 324 // Privacy mode could still be disabled in OnCookiesLoaded if we are going 325 // to send previously saved cookies. 326 request_info_.privacy_mode = enable_privacy_mode ? 327 kPrivacyModeEnabled : kPrivacyModeDisabled; 328 329 // Strip Referer from request_info_.extra_headers to prevent, e.g., plugins 330 // from overriding headers that are controlled using other means. Otherwise a 331 // plugin could set a referrer although sending the referrer is inhibited. 332 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kReferer); 333 334 // Our consumer should have made sure that this is a safe referrer. See for 335 // instance WebCore::FrameLoader::HideReferrer. 336 if (referrer.is_valid()) { 337 request_info_.extra_headers.SetHeader(HttpRequestHeaders::kReferer, 338 referrer.spec()); 339 } 340 341 request_info_.extra_headers.SetHeaderIfMissing( 342 HttpRequestHeaders::kUserAgent, 343 http_user_agent_settings_ ? 344 http_user_agent_settings_->GetUserAgent(request_->url()) : 345 EmptyString()); 346 347 AddExtraHeaders(); 348 AddCookieHeaderAndStart(); 349} 350 351void URLRequestHttpJob::Kill() { 352 http_transaction_delegate_->OnDetachRequest(); 353 354 if (!transaction_.get()) 355 return; 356 357 weak_factory_.InvalidateWeakPtrs(); 358 DestroyTransaction(); 359 URLRequestJob::Kill(); 360} 361 362void URLRequestHttpJob::NotifyHeadersComplete() { 363 DCHECK(!response_info_); 364 365 response_info_ = transaction_->GetResponseInfo(); 366 367 // Save boolean, as we'll need this info at destruction time, and filters may 368 // also need this info. 369 is_cached_content_ = response_info_->was_cached; 370 371 if (!is_cached_content_ && throttling_entry_.get()) { 372 URLRequestThrottlerHeaderAdapter response_adapter(GetResponseHeaders()); 373 throttling_entry_->UpdateWithResponse(request_info_.url.host(), 374 &response_adapter); 375 } 376 377 // The ordering of these calls is not important. 378 ProcessStrictTransportSecurityHeader(); 379 ProcessPublicKeyPinsHeader(); 380 381 if (SdchManager::Global() && 382 SdchManager::Global()->IsInSupportedDomain(request_->url())) { 383 const std::string name = "Get-Dictionary"; 384 std::string url_text; 385 void* iter = NULL; 386 // TODO(jar): We need to not fetch dictionaries the first time they are 387 // seen, but rather wait until we can justify their usefulness. 388 // For now, we will only fetch the first dictionary, which will at least 389 // require multiple suggestions before we get additional ones for this site. 390 // Eventually we should wait until a dictionary is requested several times 391 // before we even download it (so that we don't waste memory or bandwidth). 392 if (GetResponseHeaders()->EnumerateHeader(&iter, name, &url_text)) { 393 // request_->url() won't be valid in the destructor, so we use an 394 // alternate copy. 395 DCHECK_EQ(request_->url(), request_info_.url); 396 // Resolve suggested URL relative to request url. 397 sdch_dictionary_url_ = request_info_.url.Resolve(url_text); 398 } 399 } 400 401 // The HTTP transaction may be restarted several times for the purposes 402 // of sending authorization information. Each time it restarts, we get 403 // notified of the headers completion so that we can update the cookie store. 404 if (transaction_->IsReadyToRestartForAuth()) { 405 DCHECK(!response_info_->auth_challenge.get()); 406 // TODO(battre): This breaks the webrequest API for 407 // URLRequestTestHTTP.BasicAuthWithCookies 408 // where OnBeforeSendHeaders -> OnSendHeaders -> OnBeforeSendHeaders 409 // occurs. 410 RestartTransactionWithAuth(AuthCredentials()); 411 return; 412 } 413 414 URLRequestJob::NotifyHeadersComplete(); 415} 416 417void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) { 418 DoneWithRequest(FINISHED); 419 URLRequestJob::NotifyDone(status); 420} 421 422void URLRequestHttpJob::DestroyTransaction() { 423 DCHECK(transaction_.get()); 424 425 DoneWithRequest(ABORTED); 426 transaction_.reset(); 427 response_info_ = NULL; 428 receive_headers_end_ = base::TimeTicks(); 429} 430 431void URLRequestHttpJob::StartTransaction() { 432 if (network_delegate()) { 433 int rv = network_delegate()->NotifyBeforeSendHeaders( 434 request_, notify_before_headers_sent_callback_, 435 &request_info_.extra_headers); 436 // If an extension blocks the request, we rely on the callback to 437 // MaybeStartTransactionInternal(). 438 if (rv == ERR_IO_PENDING) { 439 SetBlockedOnDelegate(); 440 return; 441 } 442 MaybeStartTransactionInternal(rv); 443 return; 444 } 445 StartTransactionInternal(); 446} 447 448void URLRequestHttpJob::NotifyBeforeSendHeadersCallback(int result) { 449 SetUnblockedOnDelegate(); 450 451 // Check that there are no callbacks to already canceled requests. 452 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); 453 454 MaybeStartTransactionInternal(result); 455} 456 457void URLRequestHttpJob::MaybeStartTransactionInternal(int result) { 458 if (result == OK) { 459 StartTransactionInternal(); 460 } else { 461 std::string source("delegate"); 462 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 463 NetLog::StringCallback("source", &source)); 464 NotifyCanceled(); 465 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 466 } 467} 468 469void URLRequestHttpJob::StartTransactionInternal() { 470 // NOTE: This method assumes that request_info_ is already setup properly. 471 472 // If we already have a transaction, then we should restart the transaction 473 // with auth provided by auth_credentials_. 474 475 int rv; 476 477 if (network_delegate()) { 478 network_delegate()->NotifySendHeaders( 479 request_, request_info_.extra_headers); 480 } 481 482 if (transaction_.get()) { 483 rv = transaction_->RestartWithAuth(auth_credentials_, start_callback_); 484 auth_credentials_ = AuthCredentials(); 485 } else { 486 DCHECK(request_->context()->http_transaction_factory()); 487 488 rv = request_->context()->http_transaction_factory()->CreateTransaction( 489 priority_, &transaction_, http_transaction_delegate_.get()); 490 if (rv == OK) { 491 if (!throttling_entry_.get() || 492 !throttling_entry_->ShouldRejectRequest(*request_)) { 493 rv = transaction_->Start( 494 &request_info_, start_callback_, request_->net_log()); 495 start_time_ = base::TimeTicks::Now(); 496 } else { 497 // Special error code for the exponential back-off module. 498 rv = ERR_TEMPORARILY_THROTTLED; 499 } 500 } 501 } 502 503 if (rv == ERR_IO_PENDING) 504 return; 505 506 // The transaction started synchronously, but we need to notify the 507 // URLRequest delegate via the message loop. 508 base::MessageLoop::current()->PostTask( 509 FROM_HERE, 510 base::Bind(&URLRequestHttpJob::OnStartCompleted, 511 weak_factory_.GetWeakPtr(), rv)); 512} 513 514void URLRequestHttpJob::AddExtraHeaders() { 515 // Supply Accept-Encoding field only if it is not already provided. 516 // It should be provided IF the content is known to have restrictions on 517 // potential encoding, such as streaming multi-media. 518 // For details see bug 47381. 519 // TODO(jar, enal): jpeg files etc. should set up a request header if 520 // possible. Right now it is done only by buffered_resource_loader and 521 // simple_data_source. 522 if (!request_info_.extra_headers.HasHeader( 523 HttpRequestHeaders::kAcceptEncoding)) { 524 bool advertise_sdch = SdchManager::Global() && 525 SdchManager::Global()->IsInSupportedDomain(request_->url()); 526 std::string avail_dictionaries; 527 if (advertise_sdch) { 528 SdchManager::Global()->GetAvailDictionaryList(request_->url(), 529 &avail_dictionaries); 530 531 // The AllowLatencyExperiment() is only true if we've successfully done a 532 // full SDCH compression recently in this browser session for this host. 533 // Note that for this path, there might be no applicable dictionaries, 534 // and hence we can't participate in the experiment. 535 if (!avail_dictionaries.empty() && 536 SdchManager::Global()->AllowLatencyExperiment(request_->url())) { 537 // We are participating in the test (or control), and hence we'll 538 // eventually record statistics via either SDCH_EXPERIMENT_DECODE or 539 // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data. 540 packet_timing_enabled_ = true; 541 if (base::RandDouble() < .01) { 542 sdch_test_control_ = true; // 1% probability. 543 advertise_sdch = false; 544 } else { 545 sdch_test_activated_ = true; 546 } 547 } 548 } 549 550 // Supply Accept-Encoding headers first so that it is more likely that they 551 // will be in the first transmitted packet. This can sometimes make it 552 // easier to filter and analyze the streams to assure that a proxy has not 553 // damaged these headers. Some proxies deliberately corrupt Accept-Encoding 554 // headers. 555 if (!advertise_sdch) { 556 // Tell the server what compression formats we support (other than SDCH). 557 request_info_.extra_headers.SetHeader( 558 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate"); 559 } else { 560 // Include SDCH in acceptable list. 561 request_info_.extra_headers.SetHeader( 562 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch"); 563 if (!avail_dictionaries.empty()) { 564 request_info_.extra_headers.SetHeader( 565 kAvailDictionaryHeader, 566 avail_dictionaries); 567 sdch_dictionary_advertised_ = true; 568 // Since we're tagging this transaction as advertising a dictionary, 569 // we'll definitely employ an SDCH filter (or tentative sdch filter) 570 // when we get a response. When done, we'll record histograms via 571 // SDCH_DECODE or SDCH_PASSTHROUGH. Hence we need to record packet 572 // arrival times. 573 packet_timing_enabled_ = true; 574 } 575 } 576 } 577 578 if (http_user_agent_settings_) { 579 // Only add default Accept-Language if the request didn't have it 580 // specified. 581 std::string accept_language = 582 http_user_agent_settings_->GetAcceptLanguage(); 583 if (!accept_language.empty()) { 584 request_info_.extra_headers.SetHeaderIfMissing( 585 HttpRequestHeaders::kAcceptLanguage, 586 accept_language); 587 } 588 } 589} 590 591void URLRequestHttpJob::AddCookieHeaderAndStart() { 592 // No matter what, we want to report our status as IO pending since we will 593 // be notifying our consumer asynchronously via OnStartCompleted. 594 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 595 596 // If the request was destroyed, then there is no more work to do. 597 if (!request_) 598 return; 599 600 CookieStore* cookie_store = request_->context()->cookie_store(); 601 if (cookie_store && !(request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES)) { 602 net::CookieMonster* cookie_monster = cookie_store->GetCookieMonster(); 603 if (cookie_monster) { 604 cookie_monster->GetAllCookiesForURLAsync( 605 request_->url(), 606 base::Bind(&URLRequestHttpJob::CheckCookiePolicyAndLoad, 607 weak_factory_.GetWeakPtr())); 608 } else { 609 CheckCookiePolicyAndLoad(CookieList()); 610 } 611 } else { 612 DoStartTransaction(); 613 } 614} 615 616void URLRequestHttpJob::DoLoadCookies() { 617 CookieOptions options; 618 options.set_include_httponly(); 619 request_->context()->cookie_store()->GetCookiesWithOptionsAsync( 620 request_->url(), options, 621 base::Bind(&URLRequestHttpJob::OnCookiesLoaded, 622 weak_factory_.GetWeakPtr())); 623} 624 625void URLRequestHttpJob::CheckCookiePolicyAndLoad( 626 const CookieList& cookie_list) { 627 if (CanGetCookies(cookie_list)) 628 DoLoadCookies(); 629 else 630 DoStartTransaction(); 631} 632 633void URLRequestHttpJob::OnCookiesLoaded(const std::string& cookie_line) { 634 if (!cookie_line.empty()) { 635 request_info_.extra_headers.SetHeader( 636 HttpRequestHeaders::kCookie, cookie_line); 637 // Disable privacy mode as we are sending cookies anyway. 638 request_info_.privacy_mode = kPrivacyModeDisabled; 639 } 640 DoStartTransaction(); 641} 642 643void URLRequestHttpJob::DoStartTransaction() { 644 // We may have been canceled while retrieving cookies. 645 if (GetStatus().is_success()) { 646 StartTransaction(); 647 } else { 648 NotifyCanceled(); 649 } 650} 651 652void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete(int result) { 653 if (result != net::OK) { 654 std::string source("delegate"); 655 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 656 NetLog::StringCallback("source", &source)); 657 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 658 return; 659 } 660 661 DCHECK(transaction_.get()); 662 663 const HttpResponseInfo* response_info = transaction_->GetResponseInfo(); 664 DCHECK(response_info); 665 666 response_cookies_.clear(); 667 response_cookies_save_index_ = 0; 668 669 FetchResponseCookies(&response_cookies_); 670 671 if (!GetResponseHeaders()->GetDateValue(&response_date_)) 672 response_date_ = base::Time(); 673 674 // Now, loop over the response cookies, and attempt to persist each. 675 SaveNextCookie(); 676} 677 678// If the save occurs synchronously, SaveNextCookie will loop and save the next 679// cookie. If the save is deferred, the callback is responsible for continuing 680// to iterate through the cookies. 681// TODO(erikwright): Modify the CookieStore API to indicate via return value 682// whether it completed synchronously or asynchronously. 683// See http://crbug.com/131066. 684void URLRequestHttpJob::SaveNextCookie() { 685 // No matter what, we want to report our status as IO pending since we will 686 // be notifying our consumer asynchronously via OnStartCompleted. 687 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 688 689 // Used to communicate with the callback. See the implementation of 690 // OnCookieSaved. 691 scoped_refptr<SharedBoolean> callback_pending = new SharedBoolean(false); 692 scoped_refptr<SharedBoolean> save_next_cookie_running = 693 new SharedBoolean(true); 694 695 if (!(request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) && 696 request_->context()->cookie_store() && 697 response_cookies_.size() > 0) { 698 CookieOptions options; 699 options.set_include_httponly(); 700 options.set_server_time(response_date_); 701 702 net::CookieStore::SetCookiesCallback callback( 703 base::Bind(&URLRequestHttpJob::OnCookieSaved, 704 weak_factory_.GetWeakPtr(), 705 save_next_cookie_running, 706 callback_pending)); 707 708 // Loop through the cookies as long as SetCookieWithOptionsAsync completes 709 // synchronously. 710 while (!callback_pending->data && 711 response_cookies_save_index_ < response_cookies_.size()) { 712 if (CanSetCookie( 713 response_cookies_[response_cookies_save_index_], &options)) { 714 callback_pending->data = true; 715 request_->context()->cookie_store()->SetCookieWithOptionsAsync( 716 request_->url(), response_cookies_[response_cookies_save_index_], 717 options, callback); 718 } 719 ++response_cookies_save_index_; 720 } 721 } 722 723 save_next_cookie_running->data = false; 724 725 if (!callback_pending->data) { 726 response_cookies_.clear(); 727 response_cookies_save_index_ = 0; 728 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status 729 NotifyHeadersComplete(); 730 return; 731 } 732} 733 734// |save_next_cookie_running| is true when the callback is bound and set to 735// false when SaveNextCookie exits, allowing the callback to determine if the 736// save occurred synchronously or asynchronously. 737// |callback_pending| is false when the callback is invoked and will be set to 738// true by the callback, allowing SaveNextCookie to detect whether the save 739// occurred synchronously. 740// See SaveNextCookie() for more information. 741void URLRequestHttpJob::OnCookieSaved( 742 scoped_refptr<SharedBoolean> save_next_cookie_running, 743 scoped_refptr<SharedBoolean> callback_pending, 744 bool cookie_status) { 745 callback_pending->data = false; 746 747 // If we were called synchronously, return. 748 if (save_next_cookie_running->data) { 749 return; 750 } 751 752 // We were called asynchronously, so trigger the next save. 753 // We may have been canceled within OnSetCookie. 754 if (GetStatus().is_success()) { 755 SaveNextCookie(); 756 } else { 757 NotifyCanceled(); 758 } 759} 760 761void URLRequestHttpJob::FetchResponseCookies( 762 std::vector<std::string>* cookies) { 763 const std::string name = "Set-Cookie"; 764 std::string value; 765 766 void* iter = NULL; 767 HttpResponseHeaders* headers = GetResponseHeaders(); 768 while (headers->EnumerateHeader(&iter, name, &value)) { 769 if (!value.empty()) 770 cookies->push_back(value); 771 } 772} 773 774// NOTE: |ProcessStrictTransportSecurityHeader| and 775// |ProcessPublicKeyPinsHeader| have very similar structures, by design. 776void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() { 777 DCHECK(response_info_); 778 TransportSecurityState* security_state = 779 request_->context()->transport_security_state(); 780 const SSLInfo& ssl_info = response_info_->ssl_info; 781 782 // Only accept HSTS headers on HTTPS connections that have no 783 // certificate errors. 784 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) || 785 !security_state) 786 return; 787 788 // http://tools.ietf.org/html/draft-ietf-websec-strict-transport-sec: 789 // 790 // If a UA receives more than one STS header field in a HTTP response 791 // message over secure transport, then the UA MUST process only the 792 // first such header field. 793 HttpResponseHeaders* headers = GetResponseHeaders(); 794 std::string value; 795 if (headers->EnumerateHeader(NULL, "Strict-Transport-Security", &value)) 796 security_state->AddHSTSHeader(request_info_.url.host(), value); 797} 798 799void URLRequestHttpJob::ProcessPublicKeyPinsHeader() { 800 DCHECK(response_info_); 801 TransportSecurityState* security_state = 802 request_->context()->transport_security_state(); 803 const SSLInfo& ssl_info = response_info_->ssl_info; 804 805 // Only accept HPKP headers on HTTPS connections that have no 806 // certificate errors. 807 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) || 808 !security_state) 809 return; 810 811 // http://tools.ietf.org/html/draft-ietf-websec-key-pinning: 812 // 813 // If a UA receives more than one PKP header field in an HTTP 814 // response message over secure transport, then the UA MUST process 815 // only the first such header field. 816 HttpResponseHeaders* headers = GetResponseHeaders(); 817 std::string value; 818 if (headers->EnumerateHeader(NULL, "Public-Key-Pins", &value)) 819 security_state->AddHPKPHeader(request_info_.url.host(), value, ssl_info); 820} 821 822void URLRequestHttpJob::OnStartCompleted(int result) { 823 RecordTimer(); 824 825 // If the request was destroyed, then there is no more work to do. 826 if (!request_) 827 return; 828 829 // If the transaction was destroyed, then the job was cancelled, and 830 // we can just ignore this notification. 831 if (!transaction_.get()) 832 return; 833 834 receive_headers_end_ = base::TimeTicks::Now(); 835 836 // Clear the IO_PENDING status 837 SetStatus(URLRequestStatus()); 838 839 const URLRequestContext* context = request_->context(); 840 841 if (result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN && 842 transaction_->GetResponseInfo() != NULL) { 843 FraudulentCertificateReporter* reporter = 844 context->fraudulent_certificate_reporter(); 845 if (reporter != NULL) { 846 const SSLInfo& ssl_info = transaction_->GetResponseInfo()->ssl_info; 847 bool sni_available = SSLConfigService::IsSNIAvailable( 848 context->ssl_config_service()); 849 const std::string& host = request_->url().host(); 850 851 reporter->SendReport(host, ssl_info, sni_available); 852 } 853 } 854 855 if (result == OK) { 856 scoped_refptr<HttpResponseHeaders> headers = GetResponseHeaders(); 857 if (network_delegate()) { 858 // Note that |this| may not be deleted until 859 // |on_headers_received_callback_| or 860 // |NetworkDelegate::URLRequestDestroyed()| has been called. 861 int error = network_delegate()->NotifyHeadersReceived( 862 request_, 863 on_headers_received_callback_, 864 headers.get(), 865 &override_response_headers_); 866 if (error != net::OK) { 867 if (error == net::ERR_IO_PENDING) { 868 awaiting_callback_ = true; 869 SetBlockedOnDelegate(); 870 } else { 871 std::string source("delegate"); 872 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 873 NetLog::StringCallback("source", 874 &source)); 875 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, error)); 876 } 877 return; 878 } 879 } 880 881 SaveCookiesAndNotifyHeadersComplete(net::OK); 882 } else if (IsCertificateError(result)) { 883 // We encountered an SSL certificate error. Ask our delegate to decide 884 // what we should do. 885 886 TransportSecurityState::DomainState domain_state; 887 const URLRequestContext* context = request_->context(); 888 const bool fatal = context->transport_security_state() && 889 context->transport_security_state()->GetDomainState( 890 request_info_.url.host(), 891 SSLConfigService::IsSNIAvailable(context->ssl_config_service()), 892 &domain_state) && 893 domain_state.ShouldSSLErrorsBeFatal(); 894 NotifySSLCertificateError(transaction_->GetResponseInfo()->ssl_info, fatal); 895 } else if (result == ERR_SSL_CLIENT_AUTH_CERT_NEEDED) { 896 NotifyCertificateRequested( 897 transaction_->GetResponseInfo()->cert_request_info.get()); 898 } else { 899 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 900 } 901} 902 903void URLRequestHttpJob::OnHeadersReceivedCallback(int result) { 904 SetUnblockedOnDelegate(); 905 awaiting_callback_ = false; 906 907 // Check that there are no callbacks to already canceled requests. 908 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); 909 910 SaveCookiesAndNotifyHeadersComplete(result); 911} 912 913void URLRequestHttpJob::OnReadCompleted(int result) { 914 read_in_progress_ = false; 915 916 if (ShouldFixMismatchedContentLength(result)) 917 result = OK; 918 919 if (result == OK) { 920 NotifyDone(URLRequestStatus()); 921 } else if (result < 0) { 922 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); 923 } else { 924 // Clear the IO_PENDING status 925 SetStatus(URLRequestStatus()); 926 } 927 928 NotifyReadComplete(result); 929} 930 931void URLRequestHttpJob::RestartTransactionWithAuth( 932 const AuthCredentials& credentials) { 933 auth_credentials_ = credentials; 934 935 // These will be reset in OnStartCompleted. 936 response_info_ = NULL; 937 receive_headers_end_ = base::TimeTicks(); 938 response_cookies_.clear(); 939 940 ResetTimer(); 941 942 // Update the cookies, since the cookie store may have been updated from the 943 // headers in the 401/407. Since cookies were already appended to 944 // extra_headers, we need to strip them out before adding them again. 945 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kCookie); 946 947 AddCookieHeaderAndStart(); 948} 949 950void URLRequestHttpJob::SetUpload(UploadDataStream* upload) { 951 DCHECK(!transaction_.get()) << "cannot change once started"; 952 request_info_.upload_data_stream = upload; 953} 954 955void URLRequestHttpJob::SetExtraRequestHeaders( 956 const HttpRequestHeaders& headers) { 957 DCHECK(!transaction_.get()) << "cannot change once started"; 958 request_info_.extra_headers.CopyFrom(headers); 959} 960 961LoadState URLRequestHttpJob::GetLoadState() const { 962 return transaction_.get() ? 963 transaction_->GetLoadState() : LOAD_STATE_IDLE; 964} 965 966UploadProgress URLRequestHttpJob::GetUploadProgress() const { 967 return transaction_.get() ? 968 transaction_->GetUploadProgress() : UploadProgress(); 969} 970 971bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const { 972 DCHECK(transaction_.get()); 973 974 if (!response_info_) 975 return false; 976 977 return GetResponseHeaders()->GetMimeType(mime_type); 978} 979 980bool URLRequestHttpJob::GetCharset(std::string* charset) { 981 DCHECK(transaction_.get()); 982 983 if (!response_info_) 984 return false; 985 986 return GetResponseHeaders()->GetCharset(charset); 987} 988 989void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) { 990 DCHECK(request_); 991 DCHECK(transaction_.get()); 992 993 if (response_info_) { 994 *info = *response_info_; 995 if (override_response_headers_.get()) 996 info->headers = override_response_headers_; 997 } 998} 999 1000void URLRequestHttpJob::GetLoadTimingInfo( 1001 LoadTimingInfo* load_timing_info) const { 1002 // If haven't made it far enough to receive any headers, don't return 1003 // anything. This makes for more consistent behavior in the case of errors. 1004 if (!transaction_ || receive_headers_end_.is_null()) 1005 return; 1006 if (transaction_->GetLoadTimingInfo(load_timing_info)) 1007 load_timing_info->receive_headers_end = receive_headers_end_; 1008} 1009 1010bool URLRequestHttpJob::GetResponseCookies(std::vector<std::string>* cookies) { 1011 DCHECK(transaction_.get()); 1012 1013 if (!response_info_) 1014 return false; 1015 1016 // TODO(darin): Why are we extracting response cookies again? Perhaps we 1017 // should just leverage response_cookies_. 1018 1019 cookies->clear(); 1020 FetchResponseCookies(cookies); 1021 return true; 1022} 1023 1024int URLRequestHttpJob::GetResponseCode() const { 1025 DCHECK(transaction_.get()); 1026 1027 if (!response_info_) 1028 return -1; 1029 1030 return GetResponseHeaders()->response_code(); 1031} 1032 1033Filter* URLRequestHttpJob::SetupFilter() const { 1034 DCHECK(transaction_.get()); 1035 if (!response_info_) 1036 return NULL; 1037 1038 std::vector<Filter::FilterType> encoding_types; 1039 std::string encoding_type; 1040 HttpResponseHeaders* headers = GetResponseHeaders(); 1041 void* iter = NULL; 1042 while (headers->EnumerateHeader(&iter, "Content-Encoding", &encoding_type)) { 1043 encoding_types.push_back(Filter::ConvertEncodingToType(encoding_type)); 1044 } 1045 1046 if (filter_context_->IsSdchResponse()) { 1047 // We are wary of proxies that discard or damage SDCH encoding. If a server 1048 // explicitly states that this is not SDCH content, then we can correct our 1049 // assumption that this is an SDCH response, and avoid the need to recover 1050 // as though the content is corrupted (when we discover it is not SDCH 1051 // encoded). 1052 std::string sdch_response_status; 1053 iter = NULL; 1054 while (headers->EnumerateHeader(&iter, "X-Sdch-Encode", 1055 &sdch_response_status)) { 1056 if (sdch_response_status == "0") { 1057 filter_context_->ResetSdchResponseToFalse(); 1058 break; 1059 } 1060 } 1061 } 1062 1063 // Even if encoding types are empty, there is a chance that we need to add 1064 // some decoding, as some proxies strip encoding completely. In such cases, 1065 // we may need to add (for example) SDCH filtering (when the context suggests 1066 // it is appropriate). 1067 Filter::FixupEncodingTypes(*filter_context_, &encoding_types); 1068 1069 return !encoding_types.empty() 1070 ? Filter::Factory(encoding_types, *filter_context_) : NULL; 1071} 1072 1073bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) { 1074 // HTTP is always safe. 1075 // TODO(pauljensen): Remove once crbug.com/146591 is fixed. 1076 if (location.is_valid() && 1077 (location.scheme() == "http" || location.scheme() == "https")) { 1078 return true; 1079 } 1080 // Query URLRequestJobFactory as to whether |location| would be safe to 1081 // redirect to. 1082 return request_->context()->job_factory() && 1083 request_->context()->job_factory()->IsSafeRedirectTarget(location); 1084} 1085 1086bool URLRequestHttpJob::NeedsAuth() { 1087 int code = GetResponseCode(); 1088 if (code == -1) 1089 return false; 1090 1091 // Check if we need either Proxy or WWW Authentication. This could happen 1092 // because we either provided no auth info, or provided incorrect info. 1093 switch (code) { 1094 case 407: 1095 if (proxy_auth_state_ == AUTH_STATE_CANCELED) 1096 return false; 1097 proxy_auth_state_ = AUTH_STATE_NEED_AUTH; 1098 return true; 1099 case 401: 1100 if (server_auth_state_ == AUTH_STATE_CANCELED) 1101 return false; 1102 server_auth_state_ = AUTH_STATE_NEED_AUTH; 1103 return true; 1104 } 1105 return false; 1106} 1107 1108void URLRequestHttpJob::GetAuthChallengeInfo( 1109 scoped_refptr<AuthChallengeInfo>* result) { 1110 DCHECK(transaction_.get()); 1111 DCHECK(response_info_); 1112 1113 // sanity checks: 1114 DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH || 1115 server_auth_state_ == AUTH_STATE_NEED_AUTH); 1116 DCHECK((GetResponseHeaders()->response_code() == HTTP_UNAUTHORIZED) || 1117 (GetResponseHeaders()->response_code() == 1118 HTTP_PROXY_AUTHENTICATION_REQUIRED)); 1119 1120 *result = response_info_->auth_challenge; 1121} 1122 1123void URLRequestHttpJob::SetAuth(const AuthCredentials& credentials) { 1124 DCHECK(transaction_.get()); 1125 1126 // Proxy gets set first, then WWW. 1127 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { 1128 proxy_auth_state_ = AUTH_STATE_HAVE_AUTH; 1129 } else { 1130 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); 1131 server_auth_state_ = AUTH_STATE_HAVE_AUTH; 1132 } 1133 1134 RestartTransactionWithAuth(credentials); 1135} 1136 1137void URLRequestHttpJob::CancelAuth() { 1138 // Proxy gets set first, then WWW. 1139 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { 1140 proxy_auth_state_ = AUTH_STATE_CANCELED; 1141 } else { 1142 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); 1143 server_auth_state_ = AUTH_STATE_CANCELED; 1144 } 1145 1146 // These will be reset in OnStartCompleted. 1147 response_info_ = NULL; 1148 receive_headers_end_ = base::TimeTicks::Now(); 1149 response_cookies_.clear(); 1150 1151 ResetTimer(); 1152 1153 // OK, let the consumer read the error page... 1154 // 1155 // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false, 1156 // which will cause the consumer to receive OnResponseStarted instead of 1157 // OnAuthRequired. 1158 // 1159 // We have to do this via InvokeLater to avoid "recursing" the consumer. 1160 // 1161 base::MessageLoop::current()->PostTask( 1162 FROM_HERE, 1163 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1164 weak_factory_.GetWeakPtr(), OK)); 1165} 1166 1167void URLRequestHttpJob::ContinueWithCertificate( 1168 X509Certificate* client_cert) { 1169 DCHECK(transaction_.get()); 1170 1171 DCHECK(!response_info_) << "should not have a response yet"; 1172 receive_headers_end_ = base::TimeTicks(); 1173 1174 ResetTimer(); 1175 1176 // No matter what, we want to report our status as IO pending since we will 1177 // be notifying our consumer asynchronously via OnStartCompleted. 1178 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1179 1180 int rv = transaction_->RestartWithCertificate(client_cert, start_callback_); 1181 if (rv == ERR_IO_PENDING) 1182 return; 1183 1184 // The transaction started synchronously, but we need to notify the 1185 // URLRequest delegate via the message loop. 1186 base::MessageLoop::current()->PostTask( 1187 FROM_HERE, 1188 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1189 weak_factory_.GetWeakPtr(), rv)); 1190} 1191 1192void URLRequestHttpJob::ContinueDespiteLastError() { 1193 // If the transaction was destroyed, then the job was cancelled. 1194 if (!transaction_.get()) 1195 return; 1196 1197 DCHECK(!response_info_) << "should not have a response yet"; 1198 receive_headers_end_ = base::TimeTicks(); 1199 1200 ResetTimer(); 1201 1202 // No matter what, we want to report our status as IO pending since we will 1203 // be notifying our consumer asynchronously via OnStartCompleted. 1204 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1205 1206 int rv = transaction_->RestartIgnoringLastError(start_callback_); 1207 if (rv == ERR_IO_PENDING) 1208 return; 1209 1210 // The transaction started synchronously, but we need to notify the 1211 // URLRequest delegate via the message loop. 1212 base::MessageLoop::current()->PostTask( 1213 FROM_HERE, 1214 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1215 weak_factory_.GetWeakPtr(), rv)); 1216} 1217 1218bool URLRequestHttpJob::ShouldFixMismatchedContentLength(int rv) const { 1219 // Some servers send the body compressed, but specify the content length as 1220 // the uncompressed size. Although this violates the HTTP spec we want to 1221 // support it (as IE and FireFox do), but *only* for an exact match. 1222 // See http://crbug.com/79694. 1223 if (rv == net::ERR_CONTENT_LENGTH_MISMATCH || 1224 rv == net::ERR_INCOMPLETE_CHUNKED_ENCODING) { 1225 if (request_ && request_->response_headers()) { 1226 int64 expected_length = request_->response_headers()->GetContentLength(); 1227 VLOG(1) << __FUNCTION__ << "() " 1228 << "\"" << request_->url().spec() << "\"" 1229 << " content-length = " << expected_length 1230 << " pre total = " << prefilter_bytes_read() 1231 << " post total = " << postfilter_bytes_read(); 1232 if (postfilter_bytes_read() == expected_length) { 1233 // Clear the error. 1234 return true; 1235 } 1236 } 1237 } 1238 return false; 1239} 1240 1241bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size, 1242 int* bytes_read) { 1243 DCHECK_NE(buf_size, 0); 1244 DCHECK(bytes_read); 1245 DCHECK(!read_in_progress_); 1246 1247 int rv = transaction_->Read( 1248 buf, buf_size, 1249 base::Bind(&URLRequestHttpJob::OnReadCompleted, base::Unretained(this))); 1250 1251 if (ShouldFixMismatchedContentLength(rv)) 1252 rv = 0; 1253 1254 if (rv >= 0) { 1255 *bytes_read = rv; 1256 if (!rv) 1257 DoneWithRequest(FINISHED); 1258 return true; 1259 } 1260 1261 if (rv == ERR_IO_PENDING) { 1262 read_in_progress_ = true; 1263 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1264 } else { 1265 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); 1266 } 1267 1268 return false; 1269} 1270 1271void URLRequestHttpJob::StopCaching() { 1272 if (transaction_.get()) 1273 transaction_->StopCaching(); 1274} 1275 1276void URLRequestHttpJob::DoneReading() { 1277 if (transaction_.get()) 1278 transaction_->DoneReading(); 1279 DoneWithRequest(FINISHED); 1280} 1281 1282HostPortPair URLRequestHttpJob::GetSocketAddress() const { 1283 return response_info_ ? response_info_->socket_address : HostPortPair(); 1284} 1285 1286void URLRequestHttpJob::RecordTimer() { 1287 if (request_creation_time_.is_null()) { 1288 NOTREACHED() 1289 << "The same transaction shouldn't start twice without new timing."; 1290 return; 1291 } 1292 1293 base::TimeDelta to_start = base::Time::Now() - request_creation_time_; 1294 request_creation_time_ = base::Time(); 1295 1296 UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte", to_start); 1297} 1298 1299void URLRequestHttpJob::ResetTimer() { 1300 if (!request_creation_time_.is_null()) { 1301 NOTREACHED() 1302 << "The timer was reset before it was recorded."; 1303 return; 1304 } 1305 request_creation_time_ = base::Time::Now(); 1306} 1307 1308void URLRequestHttpJob::UpdatePacketReadTimes() { 1309 if (!packet_timing_enabled_) 1310 return; 1311 1312 if (filter_input_byte_count() <= bytes_observed_in_packets_) { 1313 DCHECK_EQ(filter_input_byte_count(), bytes_observed_in_packets_); 1314 return; // No new bytes have arrived. 1315 } 1316 1317 final_packet_time_ = base::Time::Now(); 1318 if (!bytes_observed_in_packets_) 1319 request_time_snapshot_ = request_ ? request_->request_time() : base::Time(); 1320 1321 bytes_observed_in_packets_ = filter_input_byte_count(); 1322} 1323 1324void URLRequestHttpJob::RecordPacketStats( 1325 FilterContext::StatisticSelector statistic) const { 1326 if (!packet_timing_enabled_ || (final_packet_time_ == base::Time())) 1327 return; 1328 1329 base::TimeDelta duration = final_packet_time_ - request_time_snapshot_; 1330 switch (statistic) { 1331 case FilterContext::SDCH_DECODE: { 1332 UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b", 1333 static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100); 1334 return; 1335 } 1336 case FilterContext::SDCH_PASSTHROUGH: { 1337 // Despite advertising a dictionary, we handled non-sdch compressed 1338 // content. 1339 return; 1340 } 1341 1342 case FilterContext::SDCH_EXPERIMENT_DECODE: { 1343 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Decode", 1344 duration, 1345 base::TimeDelta::FromMilliseconds(20), 1346 base::TimeDelta::FromMinutes(10), 100); 1347 return; 1348 } 1349 case FilterContext::SDCH_EXPERIMENT_HOLDBACK: { 1350 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Holdback", 1351 duration, 1352 base::TimeDelta::FromMilliseconds(20), 1353 base::TimeDelta::FromMinutes(10), 100); 1354 return; 1355 } 1356 default: 1357 NOTREACHED(); 1358 return; 1359 } 1360} 1361 1362// The common type of histogram we use for all compression-tracking histograms. 1363#define COMPRESSION_HISTOGRAM(name, sample) \ 1364 do { \ 1365 UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \ 1366 500, 1000000, 100); \ 1367 } while (0) 1368 1369void URLRequestHttpJob::RecordCompressionHistograms() { 1370 DCHECK(request_); 1371 if (!request_) 1372 return; 1373 1374 if (is_cached_content_ || // Don't record cached content 1375 !GetStatus().is_success() || // Don't record failed content 1376 !IsCompressibleContent() || // Only record compressible content 1377 !prefilter_bytes_read()) // Zero-byte responses aren't useful. 1378 return; 1379 1380 // Miniature requests aren't really compressible. Don't count them. 1381 const int kMinSize = 16; 1382 if (prefilter_bytes_read() < kMinSize) 1383 return; 1384 1385 // Only record for http or https urls. 1386 bool is_http = request_->url().SchemeIs("http"); 1387 bool is_https = request_->url().SchemeIs("https"); 1388 if (!is_http && !is_https) 1389 return; 1390 1391 int compressed_B = prefilter_bytes_read(); 1392 int decompressed_B = postfilter_bytes_read(); 1393 bool was_filtered = HasFilter(); 1394 1395 // We want to record how often downloaded resources are compressed. 1396 // But, we recognize that different protocols may have different 1397 // properties. So, for each request, we'll put it into one of 3 1398 // groups: 1399 // a) SSL resources 1400 // Proxies cannot tamper with compression headers with SSL. 1401 // b) Non-SSL, loaded-via-proxy resources 1402 // In this case, we know a proxy might have interfered. 1403 // c) Non-SSL, loaded-without-proxy resources 1404 // In this case, we know there was no explicit proxy. However, 1405 // it is possible that a transparent proxy was still interfering. 1406 // 1407 // For each group, we record the same 3 histograms. 1408 1409 if (is_https) { 1410 if (was_filtered) { 1411 COMPRESSION_HISTOGRAM("SSL.BytesBeforeCompression", compressed_B); 1412 COMPRESSION_HISTOGRAM("SSL.BytesAfterCompression", decompressed_B); 1413 } else { 1414 COMPRESSION_HISTOGRAM("SSL.ShouldHaveBeenCompressed", decompressed_B); 1415 } 1416 return; 1417 } 1418 1419 if (request_->was_fetched_via_proxy()) { 1420 if (was_filtered) { 1421 COMPRESSION_HISTOGRAM("Proxy.BytesBeforeCompression", compressed_B); 1422 COMPRESSION_HISTOGRAM("Proxy.BytesAfterCompression", decompressed_B); 1423 } else { 1424 COMPRESSION_HISTOGRAM("Proxy.ShouldHaveBeenCompressed", decompressed_B); 1425 } 1426 return; 1427 } 1428 1429 if (was_filtered) { 1430 COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B); 1431 COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B); 1432 } else { 1433 COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B); 1434 } 1435} 1436 1437bool URLRequestHttpJob::IsCompressibleContent() const { 1438 std::string mime_type; 1439 return GetMimeType(&mime_type) && 1440 (IsSupportedJavascriptMimeType(mime_type.c_str()) || 1441 IsSupportedNonImageMimeType(mime_type.c_str())); 1442} 1443 1444void URLRequestHttpJob::RecordPerfHistograms(CompletionCause reason) { 1445 if (start_time_.is_null()) 1446 return; 1447 1448 base::TimeDelta total_time = base::TimeTicks::Now() - start_time_; 1449 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTime", total_time); 1450 1451 if (reason == FINISHED) { 1452 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeSuccess", total_time); 1453 } else { 1454 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCancel", total_time); 1455 } 1456 1457 if (response_info_) { 1458 if (response_info_->was_cached) { 1459 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCached", total_time); 1460 } else { 1461 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeNotCached", total_time); 1462 } 1463 } 1464 1465 start_time_ = base::TimeTicks(); 1466} 1467 1468void URLRequestHttpJob::DoneWithRequest(CompletionCause reason) { 1469 if (done_) 1470 return; 1471 done_ = true; 1472 RecordPerfHistograms(reason); 1473 if (reason == FINISHED) { 1474 request_->set_received_response_content_length(prefilter_bytes_read()); 1475 RecordCompressionHistograms(); 1476 } 1477} 1478 1479HttpResponseHeaders* URLRequestHttpJob::GetResponseHeaders() const { 1480 DCHECK(transaction_.get()); 1481 DCHECK(transaction_->GetResponseInfo()); 1482 return override_response_headers_.get() ? 1483 override_response_headers_.get() : 1484 transaction_->GetResponseInfo()->headers.get(); 1485} 1486 1487void URLRequestHttpJob::NotifyURLRequestDestroyed() { 1488 awaiting_callback_ = false; 1489} 1490 1491void URLRequestHttpJob::OnDetachRequest() { 1492 http_transaction_delegate_->OnDetachRequest(); 1493} 1494 1495} // namespace net 1496