url_request_http_job.cc revision 1e9bf3e0803691d0a228da41fc608347b6db4340
1// Copyright (c) 2012 The Chromium Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "net/url_request/url_request_http_job.h" 6 7#include "base/base_switches.h" 8#include "base/bind.h" 9#include "base/bind_helpers.h" 10#include "base/command_line.h" 11#include "base/compiler_specific.h" 12#include "base/file_version_info.h" 13#include "base/message_loop/message_loop.h" 14#include "base/metrics/field_trial.h" 15#include "base/metrics/histogram.h" 16#include "base/rand_util.h" 17#include "base/strings/string_util.h" 18#include "base/time/time.h" 19#include "net/base/filter.h" 20#include "net/base/host_port_pair.h" 21#include "net/base/load_flags.h" 22#include "net/base/mime_util.h" 23#include "net/base/net_errors.h" 24#include "net/base/net_util.h" 25#include "net/base/network_delegate.h" 26#include "net/base/sdch_manager.h" 27#include "net/cert/cert_status_flags.h" 28#include "net/cookies/cookie_monster.h" 29#include "net/http/http_network_session.h" 30#include "net/http/http_request_headers.h" 31#include "net/http/http_response_headers.h" 32#include "net/http/http_response_info.h" 33#include "net/http/http_status_code.h" 34#include "net/http/http_transaction.h" 35#include "net/http/http_transaction_delegate.h" 36#include "net/http/http_transaction_factory.h" 37#include "net/http/http_util.h" 38#include "net/ssl/ssl_cert_request_info.h" 39#include "net/ssl/ssl_config_service.h" 40#include "net/url_request/fraudulent_certificate_reporter.h" 41#include "net/url_request/http_user_agent_settings.h" 42#include "net/url_request/url_request.h" 43#include "net/url_request/url_request_context.h" 44#include "net/url_request/url_request_error_job.h" 45#include "net/url_request/url_request_job_factory.h" 46#include "net/url_request/url_request_redirect_job.h" 47#include "net/url_request/url_request_throttler_header_adapter.h" 48#include "net/url_request/url_request_throttler_manager.h" 49 50static const char kAvailDictionaryHeader[] = "Avail-Dictionary"; 51 52namespace net { 53 54class URLRequestHttpJob::HttpFilterContext : public FilterContext { 55 public: 56 explicit HttpFilterContext(URLRequestHttpJob* job); 57 virtual ~HttpFilterContext(); 58 59 // FilterContext implementation. 60 virtual bool GetMimeType(std::string* mime_type) const OVERRIDE; 61 virtual bool GetURL(GURL* gurl) const OVERRIDE; 62 virtual base::Time GetRequestTime() const OVERRIDE; 63 virtual bool IsCachedContent() const OVERRIDE; 64 virtual bool IsDownload() const OVERRIDE; 65 virtual bool IsSdchResponse() const OVERRIDE; 66 virtual int64 GetByteReadCount() const OVERRIDE; 67 virtual int GetResponseCode() const OVERRIDE; 68 virtual void RecordPacketStats(StatisticSelector statistic) const OVERRIDE; 69 70 // Method to allow us to reset filter context for a response that should have 71 // been SDCH encoded when there is an update due to an explicit HTTP header. 72 void ResetSdchResponseToFalse(); 73 74 private: 75 URLRequestHttpJob* job_; 76 77 DISALLOW_COPY_AND_ASSIGN(HttpFilterContext); 78}; 79 80class URLRequestHttpJob::HttpTransactionDelegateImpl 81 : public HttpTransactionDelegate { 82 public: 83 HttpTransactionDelegateImpl(URLRequest* request, 84 NetworkDelegate* network_delegate) 85 : request_(request), 86 network_delegate_(network_delegate), 87 state_(NONE_ACTIVE) {} 88 virtual ~HttpTransactionDelegateImpl() { OnDetachRequest(); } 89 void OnDetachRequest() { 90 if (!IsRequestAndDelegateActive()) 91 return; 92 NotifyStateChange(NetworkDelegate::REQUEST_WAIT_STATE_RESET); 93 state_ = NONE_ACTIVE; 94 request_ = NULL; 95 } 96 virtual void OnCacheActionStart() OVERRIDE { 97 HandleStateChange(NONE_ACTIVE, 98 CACHE_ACTIVE, 99 NetworkDelegate::REQUEST_WAIT_STATE_CACHE_START); 100 } 101 virtual void OnCacheActionFinish() OVERRIDE { 102 HandleStateChange(CACHE_ACTIVE, 103 NONE_ACTIVE, 104 NetworkDelegate::REQUEST_WAIT_STATE_CACHE_FINISH); 105 } 106 virtual void OnNetworkActionStart() OVERRIDE { 107 HandleStateChange(NONE_ACTIVE, 108 NETWORK_ACTIVE, 109 NetworkDelegate::REQUEST_WAIT_STATE_NETWORK_START); 110 } 111 virtual void OnNetworkActionFinish() OVERRIDE { 112 HandleStateChange(NETWORK_ACTIVE, 113 NONE_ACTIVE, 114 NetworkDelegate::REQUEST_WAIT_STATE_NETWORK_FINISH); 115 } 116 117 private: 118 enum State { 119 NONE_ACTIVE, 120 CACHE_ACTIVE, 121 NETWORK_ACTIVE 122 }; 123 124 // Returns true if this object still has an active request and network 125 // delegate. 126 bool IsRequestAndDelegateActive() const { 127 return request_ && network_delegate_; 128 } 129 130 // Notifies the |network_delegate_| object of a change in the state of the 131 // |request_| to the state given by the |request_wait_state| argument. 132 void NotifyStateChange(NetworkDelegate::RequestWaitState request_wait_state) { 133 network_delegate_->NotifyRequestWaitStateChange(*request_, 134 request_wait_state); 135 } 136 137 // Checks the request and delegate are still active, changes |state_| from 138 // |expected_state| to |next_state|, and then notifies the network delegate of 139 // the change to |request_wait_state|. 140 void HandleStateChange(State expected_state, 141 State next_state, 142 NetworkDelegate::RequestWaitState request_wait_state) { 143 if (!IsRequestAndDelegateActive()) 144 return; 145 DCHECK_EQ(expected_state, state_); 146 state_ = next_state; 147 NotifyStateChange(request_wait_state); 148 } 149 150 URLRequest* request_; 151 NetworkDelegate* network_delegate_; 152 // Internal state tracking, for sanity checking. 153 State state_; 154 155 DISALLOW_COPY_AND_ASSIGN(HttpTransactionDelegateImpl); 156}; 157 158URLRequestHttpJob::HttpFilterContext::HttpFilterContext(URLRequestHttpJob* job) 159 : job_(job) { 160 DCHECK(job_); 161} 162 163URLRequestHttpJob::HttpFilterContext::~HttpFilterContext() { 164} 165 166bool URLRequestHttpJob::HttpFilterContext::GetMimeType( 167 std::string* mime_type) const { 168 return job_->GetMimeType(mime_type); 169} 170 171bool URLRequestHttpJob::HttpFilterContext::GetURL(GURL* gurl) const { 172 if (!job_->request()) 173 return false; 174 *gurl = job_->request()->url(); 175 return true; 176} 177 178base::Time URLRequestHttpJob::HttpFilterContext::GetRequestTime() const { 179 return job_->request() ? job_->request()->request_time() : base::Time(); 180} 181 182bool URLRequestHttpJob::HttpFilterContext::IsCachedContent() const { 183 return job_->is_cached_content_; 184} 185 186bool URLRequestHttpJob::HttpFilterContext::IsDownload() const { 187 return (job_->request_info_.load_flags & LOAD_IS_DOWNLOAD) != 0; 188} 189 190void URLRequestHttpJob::HttpFilterContext::ResetSdchResponseToFalse() { 191 DCHECK(job_->sdch_dictionary_advertised_); 192 job_->sdch_dictionary_advertised_ = false; 193} 194 195bool URLRequestHttpJob::HttpFilterContext::IsSdchResponse() const { 196 return job_->sdch_dictionary_advertised_; 197} 198 199int64 URLRequestHttpJob::HttpFilterContext::GetByteReadCount() const { 200 return job_->filter_input_byte_count(); 201} 202 203int URLRequestHttpJob::HttpFilterContext::GetResponseCode() const { 204 return job_->GetResponseCode(); 205} 206 207void URLRequestHttpJob::HttpFilterContext::RecordPacketStats( 208 StatisticSelector statistic) const { 209 job_->RecordPacketStats(statistic); 210} 211 212// TODO(darin): make sure the port blocking code is not lost 213// static 214URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request, 215 NetworkDelegate* network_delegate, 216 const std::string& scheme) { 217 DCHECK(scheme == "http" || scheme == "https"); 218 219 if (!request->context()->http_transaction_factory()) { 220 NOTREACHED() << "requires a valid context"; 221 return new URLRequestErrorJob( 222 request, network_delegate, ERR_INVALID_ARGUMENT); 223 } 224 225 GURL redirect_url; 226 if (request->GetHSTSRedirect(&redirect_url)) { 227 return new URLRequestRedirectJob( 228 request, network_delegate, redirect_url, 229 // Use status code 307 to preserve the method, so POST requests work. 230 URLRequestRedirectJob::REDIRECT_307_TEMPORARY_REDIRECT); 231 } 232 return new URLRequestHttpJob(request, 233 network_delegate, 234 request->context()->http_user_agent_settings()); 235} 236 237URLRequestHttpJob::URLRequestHttpJob( 238 URLRequest* request, 239 NetworkDelegate* network_delegate, 240 const HttpUserAgentSettings* http_user_agent_settings) 241 : URLRequestJob(request, network_delegate), 242 priority_(DEFAULT_PRIORITY), 243 response_info_(NULL), 244 response_cookies_save_index_(0), 245 proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH), 246 server_auth_state_(AUTH_STATE_DONT_NEED_AUTH), 247 start_callback_(base::Bind(&URLRequestHttpJob::OnStartCompleted, 248 base::Unretained(this))), 249 notify_before_headers_sent_callback_( 250 base::Bind(&URLRequestHttpJob::NotifyBeforeSendHeadersCallback, 251 base::Unretained(this))), 252 read_in_progress_(false), 253 throttling_entry_(NULL), 254 sdch_dictionary_advertised_(false), 255 sdch_test_activated_(false), 256 sdch_test_control_(false), 257 is_cached_content_(false), 258 request_creation_time_(), 259 packet_timing_enabled_(false), 260 done_(false), 261 bytes_observed_in_packets_(0), 262 request_time_snapshot_(), 263 final_packet_time_(), 264 filter_context_(new HttpFilterContext(this)), 265 weak_factory_(this), 266 on_headers_received_callback_( 267 base::Bind(&URLRequestHttpJob::OnHeadersReceivedCallback, 268 base::Unretained(this))), 269 awaiting_callback_(false), 270 http_transaction_delegate_( 271 new HttpTransactionDelegateImpl(request, network_delegate)), 272 http_user_agent_settings_(http_user_agent_settings) { 273 URLRequestThrottlerManager* manager = request->context()->throttler_manager(); 274 if (manager) 275 throttling_entry_ = manager->RegisterRequestUrl(request->url()); 276 277 ResetTimer(); 278} 279 280URLRequestHttpJob::~URLRequestHttpJob() { 281 CHECK(!awaiting_callback_); 282 283 DCHECK(!sdch_test_control_ || !sdch_test_activated_); 284 if (!is_cached_content_) { 285 if (sdch_test_control_) 286 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK); 287 if (sdch_test_activated_) 288 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE); 289 } 290 // Make sure SDCH filters are told to emit histogram data while 291 // filter_context_ is still alive. 292 DestroyFilters(); 293 294 if (sdch_dictionary_url_.is_valid()) { 295 // Prior to reaching the destructor, request_ has been set to a NULL 296 // pointer, so request_->url() is no longer valid in the destructor, and we 297 // use an alternate copy |request_info_.url|. 298 SdchManager* manager = SdchManager::Global(); 299 // To be extra safe, since this is a "different time" from when we decided 300 // to get the dictionary, we'll validate that an SdchManager is available. 301 // At shutdown time, care is taken to be sure that we don't delete this 302 // globally useful instance "too soon," so this check is just defensive 303 // coding to assure that IF the system is shutting down, we don't have any 304 // problem if the manager was deleted ahead of time. 305 if (manager) // Defensive programming. 306 manager->FetchDictionary(request_info_.url, sdch_dictionary_url_); 307 } 308 DoneWithRequest(ABORTED); 309} 310 311void URLRequestHttpJob::SetPriority(RequestPriority priority) { 312 priority_ = priority; 313 if (transaction_) 314 transaction_->SetPriority(priority_); 315} 316 317void URLRequestHttpJob::Start() { 318 DCHECK(!transaction_.get()); 319 320 // URLRequest::SetReferrer ensures that we do not send username and password 321 // fields in the referrer. 322 GURL referrer(request_->referrer()); 323 324 request_info_.url = request_->url(); 325 request_info_.method = request_->method(); 326 request_info_.load_flags = request_->load_flags(); 327 // Enable privacy mode if cookie settings or flags tell us not send or 328 // save cookies. 329 bool enable_privacy_mode = 330 (request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES) || 331 (request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) || 332 CanEnablePrivacyMode(); 333 // Privacy mode could still be disabled in OnCookiesLoaded if we are going 334 // to send previously saved cookies. 335 request_info_.privacy_mode = enable_privacy_mode ? 336 kPrivacyModeEnabled : kPrivacyModeDisabled; 337 338 // Strip Referer from request_info_.extra_headers to prevent, e.g., plugins 339 // from overriding headers that are controlled using other means. Otherwise a 340 // plugin could set a referrer although sending the referrer is inhibited. 341 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kReferer); 342 343 // Our consumer should have made sure that this is a safe referrer. See for 344 // instance WebCore::FrameLoader::HideReferrer. 345 if (referrer.is_valid()) { 346 request_info_.extra_headers.SetHeader(HttpRequestHeaders::kReferer, 347 referrer.spec()); 348 } 349 350 request_info_.extra_headers.SetHeaderIfMissing( 351 HttpRequestHeaders::kUserAgent, 352 http_user_agent_settings_ ? 353 http_user_agent_settings_->GetUserAgent(request_->url()) : 354 EmptyString()); 355 356 AddExtraHeaders(); 357 AddCookieHeaderAndStart(); 358} 359 360void URLRequestHttpJob::Kill() { 361 http_transaction_delegate_->OnDetachRequest(); 362 363 if (!transaction_.get()) 364 return; 365 366 weak_factory_.InvalidateWeakPtrs(); 367 DestroyTransaction(); 368 URLRequestJob::Kill(); 369} 370 371void URLRequestHttpJob::NotifyHeadersComplete() { 372 DCHECK(!response_info_); 373 374 response_info_ = transaction_->GetResponseInfo(); 375 376 // Save boolean, as we'll need this info at destruction time, and filters may 377 // also need this info. 378 is_cached_content_ = response_info_->was_cached; 379 380 if (!is_cached_content_ && throttling_entry_.get()) { 381 URLRequestThrottlerHeaderAdapter response_adapter(GetResponseHeaders()); 382 throttling_entry_->UpdateWithResponse(request_info_.url.host(), 383 &response_adapter); 384 } 385 386 // The ordering of these calls is not important. 387 ProcessStrictTransportSecurityHeader(); 388 ProcessPublicKeyPinsHeader(); 389 390 if (SdchManager::Global() && 391 SdchManager::Global()->IsInSupportedDomain(request_->url())) { 392 const std::string name = "Get-Dictionary"; 393 std::string url_text; 394 void* iter = NULL; 395 // TODO(jar): We need to not fetch dictionaries the first time they are 396 // seen, but rather wait until we can justify their usefulness. 397 // For now, we will only fetch the first dictionary, which will at least 398 // require multiple suggestions before we get additional ones for this site. 399 // Eventually we should wait until a dictionary is requested several times 400 // before we even download it (so that we don't waste memory or bandwidth). 401 if (GetResponseHeaders()->EnumerateHeader(&iter, name, &url_text)) { 402 // request_->url() won't be valid in the destructor, so we use an 403 // alternate copy. 404 DCHECK_EQ(request_->url(), request_info_.url); 405 // Resolve suggested URL relative to request url. 406 sdch_dictionary_url_ = request_info_.url.Resolve(url_text); 407 } 408 } 409 410 // The HTTP transaction may be restarted several times for the purposes 411 // of sending authorization information. Each time it restarts, we get 412 // notified of the headers completion so that we can update the cookie store. 413 if (transaction_->IsReadyToRestartForAuth()) { 414 DCHECK(!response_info_->auth_challenge.get()); 415 // TODO(battre): This breaks the webrequest API for 416 // URLRequestTestHTTP.BasicAuthWithCookies 417 // where OnBeforeSendHeaders -> OnSendHeaders -> OnBeforeSendHeaders 418 // occurs. 419 RestartTransactionWithAuth(AuthCredentials()); 420 return; 421 } 422 423 URLRequestJob::NotifyHeadersComplete(); 424} 425 426void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) { 427 DoneWithRequest(FINISHED); 428 URLRequestJob::NotifyDone(status); 429} 430 431void URLRequestHttpJob::DestroyTransaction() { 432 DCHECK(transaction_.get()); 433 434 DoneWithRequest(ABORTED); 435 transaction_.reset(); 436 response_info_ = NULL; 437 receive_headers_end_ = base::TimeTicks(); 438} 439 440void URLRequestHttpJob::StartTransaction() { 441 if (network_delegate()) { 442 OnCallToDelegate(); 443 int rv = network_delegate()->NotifyBeforeSendHeaders( 444 request_, notify_before_headers_sent_callback_, 445 &request_info_.extra_headers); 446 // If an extension blocks the request, we rely on the callback to 447 // MaybeStartTransactionInternal(). 448 if (rv == ERR_IO_PENDING) 449 return; 450 MaybeStartTransactionInternal(rv); 451 return; 452 } 453 StartTransactionInternal(); 454} 455 456void URLRequestHttpJob::NotifyBeforeSendHeadersCallback(int result) { 457 // Check that there are no callbacks to already canceled requests. 458 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); 459 460 MaybeStartTransactionInternal(result); 461} 462 463void URLRequestHttpJob::MaybeStartTransactionInternal(int result) { 464 OnCallToDelegateComplete(); 465 if (result == OK) { 466 StartTransactionInternal(); 467 } else { 468 std::string source("delegate"); 469 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 470 NetLog::StringCallback("source", &source)); 471 NotifyCanceled(); 472 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 473 } 474} 475 476void URLRequestHttpJob::StartTransactionInternal() { 477 // NOTE: This method assumes that request_info_ is already setup properly. 478 479 // If we already have a transaction, then we should restart the transaction 480 // with auth provided by auth_credentials_. 481 482 int rv; 483 484 if (network_delegate()) { 485 network_delegate()->NotifySendHeaders( 486 request_, request_info_.extra_headers); 487 } 488 489 if (transaction_.get()) { 490 rv = transaction_->RestartWithAuth(auth_credentials_, start_callback_); 491 auth_credentials_ = AuthCredentials(); 492 } else { 493 DCHECK(request_->context()->http_transaction_factory()); 494 495 rv = request_->context()->http_transaction_factory()->CreateTransaction( 496 priority_, &transaction_, http_transaction_delegate_.get()); 497 if (rv == OK) { 498 if (!throttling_entry_.get() || 499 !throttling_entry_->ShouldRejectRequest(*request_)) { 500 rv = transaction_->Start( 501 &request_info_, start_callback_, request_->net_log()); 502 start_time_ = base::TimeTicks::Now(); 503 } else { 504 // Special error code for the exponential back-off module. 505 rv = ERR_TEMPORARILY_THROTTLED; 506 } 507 } 508 } 509 510 if (rv == ERR_IO_PENDING) 511 return; 512 513 // The transaction started synchronously, but we need to notify the 514 // URLRequest delegate via the message loop. 515 base::MessageLoop::current()->PostTask( 516 FROM_HERE, 517 base::Bind(&URLRequestHttpJob::OnStartCompleted, 518 weak_factory_.GetWeakPtr(), rv)); 519} 520 521void URLRequestHttpJob::AddExtraHeaders() { 522 // Supply Accept-Encoding field only if it is not already provided. 523 // It should be provided IF the content is known to have restrictions on 524 // potential encoding, such as streaming multi-media. 525 // For details see bug 47381. 526 // TODO(jar, enal): jpeg files etc. should set up a request header if 527 // possible. Right now it is done only by buffered_resource_loader and 528 // simple_data_source. 529 if (!request_info_.extra_headers.HasHeader( 530 HttpRequestHeaders::kAcceptEncoding)) { 531 bool advertise_sdch = SdchManager::Global() && 532 SdchManager::Global()->IsInSupportedDomain(request_->url()); 533 std::string avail_dictionaries; 534 if (advertise_sdch) { 535 SdchManager::Global()->GetAvailDictionaryList(request_->url(), 536 &avail_dictionaries); 537 538 // The AllowLatencyExperiment() is only true if we've successfully done a 539 // full SDCH compression recently in this browser session for this host. 540 // Note that for this path, there might be no applicable dictionaries, 541 // and hence we can't participate in the experiment. 542 if (!avail_dictionaries.empty() && 543 SdchManager::Global()->AllowLatencyExperiment(request_->url())) { 544 // We are participating in the test (or control), and hence we'll 545 // eventually record statistics via either SDCH_EXPERIMENT_DECODE or 546 // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data. 547 packet_timing_enabled_ = true; 548 if (base::RandDouble() < .01) { 549 sdch_test_control_ = true; // 1% probability. 550 advertise_sdch = false; 551 } else { 552 sdch_test_activated_ = true; 553 } 554 } 555 } 556 557 // Supply Accept-Encoding headers first so that it is more likely that they 558 // will be in the first transmitted packet. This can sometimes make it 559 // easier to filter and analyze the streams to assure that a proxy has not 560 // damaged these headers. Some proxies deliberately corrupt Accept-Encoding 561 // headers. 562 if (!advertise_sdch) { 563 // Tell the server what compression formats we support (other than SDCH). 564 request_info_.extra_headers.SetHeader( 565 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate"); 566 } else { 567 // Include SDCH in acceptable list. 568 request_info_.extra_headers.SetHeader( 569 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch"); 570 if (!avail_dictionaries.empty()) { 571 request_info_.extra_headers.SetHeader( 572 kAvailDictionaryHeader, 573 avail_dictionaries); 574 sdch_dictionary_advertised_ = true; 575 // Since we're tagging this transaction as advertising a dictionary, 576 // we'll definitely employ an SDCH filter (or tentative sdch filter) 577 // when we get a response. When done, we'll record histograms via 578 // SDCH_DECODE or SDCH_PASSTHROUGH. Hence we need to record packet 579 // arrival times. 580 packet_timing_enabled_ = true; 581 } 582 } 583 } 584 585 if (http_user_agent_settings_) { 586 // Only add default Accept-Language if the request didn't have it 587 // specified. 588 std::string accept_language = 589 http_user_agent_settings_->GetAcceptLanguage(); 590 if (!accept_language.empty()) { 591 request_info_.extra_headers.SetHeaderIfMissing( 592 HttpRequestHeaders::kAcceptLanguage, 593 accept_language); 594 } 595 } 596} 597 598void URLRequestHttpJob::AddCookieHeaderAndStart() { 599 // No matter what, we want to report our status as IO pending since we will 600 // be notifying our consumer asynchronously via OnStartCompleted. 601 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 602 603 // If the request was destroyed, then there is no more work to do. 604 if (!request_) 605 return; 606 607 CookieStore* cookie_store = request_->context()->cookie_store(); 608 if (cookie_store && !(request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES)) { 609 net::CookieMonster* cookie_monster = cookie_store->GetCookieMonster(); 610 if (cookie_monster) { 611 cookie_monster->GetAllCookiesForURLAsync( 612 request_->url(), 613 base::Bind(&URLRequestHttpJob::CheckCookiePolicyAndLoad, 614 weak_factory_.GetWeakPtr())); 615 } else { 616 CheckCookiePolicyAndLoad(CookieList()); 617 } 618 } else { 619 DoStartTransaction(); 620 } 621} 622 623void URLRequestHttpJob::DoLoadCookies() { 624 CookieOptions options; 625 options.set_include_httponly(); 626 request_->context()->cookie_store()->GetCookiesWithOptionsAsync( 627 request_->url(), options, 628 base::Bind(&URLRequestHttpJob::OnCookiesLoaded, 629 weak_factory_.GetWeakPtr())); 630} 631 632void URLRequestHttpJob::CheckCookiePolicyAndLoad( 633 const CookieList& cookie_list) { 634 if (CanGetCookies(cookie_list)) 635 DoLoadCookies(); 636 else 637 DoStartTransaction(); 638} 639 640void URLRequestHttpJob::OnCookiesLoaded(const std::string& cookie_line) { 641 if (!cookie_line.empty()) { 642 request_info_.extra_headers.SetHeader( 643 HttpRequestHeaders::kCookie, cookie_line); 644 // Disable privacy mode as we are sending cookies anyway. 645 request_info_.privacy_mode = kPrivacyModeDisabled; 646 } 647 DoStartTransaction(); 648} 649 650void URLRequestHttpJob::DoStartTransaction() { 651 // We may have been canceled while retrieving cookies. 652 if (GetStatus().is_success()) { 653 StartTransaction(); 654 } else { 655 NotifyCanceled(); 656 } 657} 658 659void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete(int result) { 660 // End of the call started in OnStartCompleted. 661 OnCallToDelegateComplete(); 662 663 if (result != net::OK) { 664 std::string source("delegate"); 665 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 666 NetLog::StringCallback("source", &source)); 667 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 668 return; 669 } 670 671 DCHECK(transaction_.get()); 672 673 const HttpResponseInfo* response_info = transaction_->GetResponseInfo(); 674 DCHECK(response_info); 675 676 response_cookies_.clear(); 677 response_cookies_save_index_ = 0; 678 679 FetchResponseCookies(&response_cookies_); 680 681 if (!GetResponseHeaders()->GetDateValue(&response_date_)) 682 response_date_ = base::Time(); 683 684 // Now, loop over the response cookies, and attempt to persist each. 685 SaveNextCookie(); 686} 687 688// If the save occurs synchronously, SaveNextCookie will loop and save the next 689// cookie. If the save is deferred, the callback is responsible for continuing 690// to iterate through the cookies. 691// TODO(erikwright): Modify the CookieStore API to indicate via return value 692// whether it completed synchronously or asynchronously. 693// See http://crbug.com/131066. 694void URLRequestHttpJob::SaveNextCookie() { 695 // No matter what, we want to report our status as IO pending since we will 696 // be notifying our consumer asynchronously via OnStartCompleted. 697 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 698 699 // Used to communicate with the callback. See the implementation of 700 // OnCookieSaved. 701 scoped_refptr<SharedBoolean> callback_pending = new SharedBoolean(false); 702 scoped_refptr<SharedBoolean> save_next_cookie_running = 703 new SharedBoolean(true); 704 705 if (!(request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) && 706 request_->context()->cookie_store() && 707 response_cookies_.size() > 0) { 708 CookieOptions options; 709 options.set_include_httponly(); 710 options.set_server_time(response_date_); 711 712 net::CookieStore::SetCookiesCallback callback( 713 base::Bind(&URLRequestHttpJob::OnCookieSaved, 714 weak_factory_.GetWeakPtr(), 715 save_next_cookie_running, 716 callback_pending)); 717 718 // Loop through the cookies as long as SetCookieWithOptionsAsync completes 719 // synchronously. 720 while (!callback_pending->data && 721 response_cookies_save_index_ < response_cookies_.size()) { 722 if (CanSetCookie( 723 response_cookies_[response_cookies_save_index_], &options)) { 724 callback_pending->data = true; 725 request_->context()->cookie_store()->SetCookieWithOptionsAsync( 726 request_->url(), response_cookies_[response_cookies_save_index_], 727 options, callback); 728 } 729 ++response_cookies_save_index_; 730 } 731 } 732 733 save_next_cookie_running->data = false; 734 735 if (!callback_pending->data) { 736 response_cookies_.clear(); 737 response_cookies_save_index_ = 0; 738 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status 739 NotifyHeadersComplete(); 740 return; 741 } 742} 743 744// |save_next_cookie_running| is true when the callback is bound and set to 745// false when SaveNextCookie exits, allowing the callback to determine if the 746// save occurred synchronously or asynchronously. 747// |callback_pending| is false when the callback is invoked and will be set to 748// true by the callback, allowing SaveNextCookie to detect whether the save 749// occurred synchronously. 750// See SaveNextCookie() for more information. 751void URLRequestHttpJob::OnCookieSaved( 752 scoped_refptr<SharedBoolean> save_next_cookie_running, 753 scoped_refptr<SharedBoolean> callback_pending, 754 bool cookie_status) { 755 callback_pending->data = false; 756 757 // If we were called synchronously, return. 758 if (save_next_cookie_running->data) { 759 return; 760 } 761 762 // We were called asynchronously, so trigger the next save. 763 // We may have been canceled within OnSetCookie. 764 if (GetStatus().is_success()) { 765 SaveNextCookie(); 766 } else { 767 NotifyCanceled(); 768 } 769} 770 771void URLRequestHttpJob::FetchResponseCookies( 772 std::vector<std::string>* cookies) { 773 const std::string name = "Set-Cookie"; 774 std::string value; 775 776 void* iter = NULL; 777 HttpResponseHeaders* headers = GetResponseHeaders(); 778 while (headers->EnumerateHeader(&iter, name, &value)) { 779 if (!value.empty()) 780 cookies->push_back(value); 781 } 782} 783 784// NOTE: |ProcessStrictTransportSecurityHeader| and 785// |ProcessPublicKeyPinsHeader| have very similar structures, by design. 786void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() { 787 DCHECK(response_info_); 788 TransportSecurityState* security_state = 789 request_->context()->transport_security_state(); 790 const SSLInfo& ssl_info = response_info_->ssl_info; 791 792 // Only accept HSTS headers on HTTPS connections that have no 793 // certificate errors. 794 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) || 795 !security_state) 796 return; 797 798 // http://tools.ietf.org/html/draft-ietf-websec-strict-transport-sec: 799 // 800 // If a UA receives more than one STS header field in a HTTP response 801 // message over secure transport, then the UA MUST process only the 802 // first such header field. 803 HttpResponseHeaders* headers = GetResponseHeaders(); 804 std::string value; 805 if (headers->EnumerateHeader(NULL, "Strict-Transport-Security", &value)) 806 security_state->AddHSTSHeader(request_info_.url.host(), value); 807} 808 809void URLRequestHttpJob::ProcessPublicKeyPinsHeader() { 810 DCHECK(response_info_); 811 TransportSecurityState* security_state = 812 request_->context()->transport_security_state(); 813 const SSLInfo& ssl_info = response_info_->ssl_info; 814 815 // Only accept HPKP headers on HTTPS connections that have no 816 // certificate errors. 817 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) || 818 !security_state) 819 return; 820 821 // http://tools.ietf.org/html/draft-ietf-websec-key-pinning: 822 // 823 // If a UA receives more than one PKP header field in an HTTP 824 // response message over secure transport, then the UA MUST process 825 // only the first such header field. 826 HttpResponseHeaders* headers = GetResponseHeaders(); 827 std::string value; 828 if (headers->EnumerateHeader(NULL, "Public-Key-Pins", &value)) 829 security_state->AddHPKPHeader(request_info_.url.host(), value, ssl_info); 830} 831 832void URLRequestHttpJob::OnStartCompleted(int result) { 833 RecordTimer(); 834 835 // If the request was destroyed, then there is no more work to do. 836 if (!request_) 837 return; 838 839 // If the transaction was destroyed, then the job was cancelled, and 840 // we can just ignore this notification. 841 if (!transaction_.get()) 842 return; 843 844 receive_headers_end_ = base::TimeTicks::Now(); 845 846 // Clear the IO_PENDING status 847 SetStatus(URLRequestStatus()); 848 849 const URLRequestContext* context = request_->context(); 850 851 if (result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN && 852 transaction_->GetResponseInfo() != NULL) { 853 FraudulentCertificateReporter* reporter = 854 context->fraudulent_certificate_reporter(); 855 if (reporter != NULL) { 856 const SSLInfo& ssl_info = transaction_->GetResponseInfo()->ssl_info; 857 bool sni_available = SSLConfigService::IsSNIAvailable( 858 context->ssl_config_service()); 859 const std::string& host = request_->url().host(); 860 861 reporter->SendReport(host, ssl_info, sni_available); 862 } 863 } 864 865 if (result == OK) { 866 scoped_refptr<HttpResponseHeaders> headers = GetResponseHeaders(); 867 if (network_delegate()) { 868 // Note that |this| may not be deleted until 869 // |on_headers_received_callback_| or 870 // |NetworkDelegate::URLRequestDestroyed()| has been called. 871 OnCallToDelegate(); 872 int error = network_delegate()->NotifyHeadersReceived( 873 request_, 874 on_headers_received_callback_, 875 headers.get(), 876 &override_response_headers_); 877 if (error != net::OK) { 878 if (error == net::ERR_IO_PENDING) { 879 awaiting_callback_ = true; 880 } else { 881 std::string source("delegate"); 882 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 883 NetLog::StringCallback("source", 884 &source)); 885 OnCallToDelegateComplete(); 886 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, error)); 887 } 888 return; 889 } 890 } 891 892 SaveCookiesAndNotifyHeadersComplete(net::OK); 893 } else if (IsCertificateError(result)) { 894 // We encountered an SSL certificate error. 895 if (result == ERR_SSL_WEAK_SERVER_EPHEMERAL_DH_KEY || 896 result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN) { 897 // These are hard failures. They're handled separately and don't have 898 // the correct cert status, so set it here. 899 SSLInfo info(transaction_->GetResponseInfo()->ssl_info); 900 info.cert_status = MapNetErrorToCertStatus(result); 901 NotifySSLCertificateError(info, true); 902 } else { 903 // Maybe overridable, maybe not. Ask the delegate to decide. 904 TransportSecurityState::DomainState domain_state; 905 const URLRequestContext* context = request_->context(); 906 const bool fatal = context->transport_security_state() && 907 context->transport_security_state()->GetDomainState( 908 request_info_.url.host(), 909 SSLConfigService::IsSNIAvailable(context->ssl_config_service()), 910 &domain_state) && 911 domain_state.ShouldSSLErrorsBeFatal(); 912 NotifySSLCertificateError( 913 transaction_->GetResponseInfo()->ssl_info, fatal); 914 } 915 } else if (result == ERR_SSL_CLIENT_AUTH_CERT_NEEDED) { 916 NotifyCertificateRequested( 917 transaction_->GetResponseInfo()->cert_request_info.get()); 918 } else { 919 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 920 } 921} 922 923void URLRequestHttpJob::OnHeadersReceivedCallback(int result) { 924 awaiting_callback_ = false; 925 926 // Check that there are no callbacks to already canceled requests. 927 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); 928 929 SaveCookiesAndNotifyHeadersComplete(result); 930} 931 932void URLRequestHttpJob::OnReadCompleted(int result) { 933 read_in_progress_ = false; 934 935 if (ShouldFixMismatchedContentLength(result)) 936 result = OK; 937 938 if (result == OK) { 939 NotifyDone(URLRequestStatus()); 940 } else if (result < 0) { 941 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); 942 } else { 943 // Clear the IO_PENDING status 944 SetStatus(URLRequestStatus()); 945 } 946 947 NotifyReadComplete(result); 948} 949 950void URLRequestHttpJob::RestartTransactionWithAuth( 951 const AuthCredentials& credentials) { 952 auth_credentials_ = credentials; 953 954 // These will be reset in OnStartCompleted. 955 response_info_ = NULL; 956 receive_headers_end_ = base::TimeTicks(); 957 response_cookies_.clear(); 958 959 ResetTimer(); 960 961 // Update the cookies, since the cookie store may have been updated from the 962 // headers in the 401/407. Since cookies were already appended to 963 // extra_headers, we need to strip them out before adding them again. 964 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kCookie); 965 966 AddCookieHeaderAndStart(); 967} 968 969void URLRequestHttpJob::SetUpload(UploadDataStream* upload) { 970 DCHECK(!transaction_.get()) << "cannot change once started"; 971 request_info_.upload_data_stream = upload; 972} 973 974void URLRequestHttpJob::SetExtraRequestHeaders( 975 const HttpRequestHeaders& headers) { 976 DCHECK(!transaction_.get()) << "cannot change once started"; 977 request_info_.extra_headers.CopyFrom(headers); 978} 979 980LoadState URLRequestHttpJob::GetLoadState() const { 981 return transaction_.get() ? 982 transaction_->GetLoadState() : LOAD_STATE_IDLE; 983} 984 985UploadProgress URLRequestHttpJob::GetUploadProgress() const { 986 return transaction_.get() ? 987 transaction_->GetUploadProgress() : UploadProgress(); 988} 989 990bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const { 991 DCHECK(transaction_.get()); 992 993 if (!response_info_) 994 return false; 995 996 return GetResponseHeaders()->GetMimeType(mime_type); 997} 998 999bool URLRequestHttpJob::GetCharset(std::string* charset) { 1000 DCHECK(transaction_.get()); 1001 1002 if (!response_info_) 1003 return false; 1004 1005 return GetResponseHeaders()->GetCharset(charset); 1006} 1007 1008void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) { 1009 DCHECK(request_); 1010 DCHECK(transaction_.get()); 1011 1012 if (response_info_) { 1013 *info = *response_info_; 1014 if (override_response_headers_.get()) 1015 info->headers = override_response_headers_; 1016 } 1017} 1018 1019void URLRequestHttpJob::GetLoadTimingInfo( 1020 LoadTimingInfo* load_timing_info) const { 1021 // If haven't made it far enough to receive any headers, don't return 1022 // anything. This makes for more consistent behavior in the case of errors. 1023 if (!transaction_ || receive_headers_end_.is_null()) 1024 return; 1025 if (transaction_->GetLoadTimingInfo(load_timing_info)) 1026 load_timing_info->receive_headers_end = receive_headers_end_; 1027} 1028 1029bool URLRequestHttpJob::GetResponseCookies(std::vector<std::string>* cookies) { 1030 DCHECK(transaction_.get()); 1031 1032 if (!response_info_) 1033 return false; 1034 1035 // TODO(darin): Why are we extracting response cookies again? Perhaps we 1036 // should just leverage response_cookies_. 1037 1038 cookies->clear(); 1039 FetchResponseCookies(cookies); 1040 return true; 1041} 1042 1043int URLRequestHttpJob::GetResponseCode() const { 1044 DCHECK(transaction_.get()); 1045 1046 if (!response_info_) 1047 return -1; 1048 1049 return GetResponseHeaders()->response_code(); 1050} 1051 1052Filter* URLRequestHttpJob::SetupFilter() const { 1053 DCHECK(transaction_.get()); 1054 if (!response_info_) 1055 return NULL; 1056 1057 std::vector<Filter::FilterType> encoding_types; 1058 std::string encoding_type; 1059 HttpResponseHeaders* headers = GetResponseHeaders(); 1060 void* iter = NULL; 1061 while (headers->EnumerateHeader(&iter, "Content-Encoding", &encoding_type)) { 1062 encoding_types.push_back(Filter::ConvertEncodingToType(encoding_type)); 1063 } 1064 1065 if (filter_context_->IsSdchResponse()) { 1066 // We are wary of proxies that discard or damage SDCH encoding. If a server 1067 // explicitly states that this is not SDCH content, then we can correct our 1068 // assumption that this is an SDCH response, and avoid the need to recover 1069 // as though the content is corrupted (when we discover it is not SDCH 1070 // encoded). 1071 std::string sdch_response_status; 1072 iter = NULL; 1073 while (headers->EnumerateHeader(&iter, "X-Sdch-Encode", 1074 &sdch_response_status)) { 1075 if (sdch_response_status == "0") { 1076 filter_context_->ResetSdchResponseToFalse(); 1077 break; 1078 } 1079 } 1080 } 1081 1082 // Even if encoding types are empty, there is a chance that we need to add 1083 // some decoding, as some proxies strip encoding completely. In such cases, 1084 // we may need to add (for example) SDCH filtering (when the context suggests 1085 // it is appropriate). 1086 Filter::FixupEncodingTypes(*filter_context_, &encoding_types); 1087 1088 return !encoding_types.empty() 1089 ? Filter::Factory(encoding_types, *filter_context_) : NULL; 1090} 1091 1092bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) { 1093 // HTTP is always safe. 1094 // TODO(pauljensen): Remove once crbug.com/146591 is fixed. 1095 if (location.is_valid() && 1096 (location.scheme() == "http" || location.scheme() == "https")) { 1097 return true; 1098 } 1099 // Query URLRequestJobFactory as to whether |location| would be safe to 1100 // redirect to. 1101 return request_->context()->job_factory() && 1102 request_->context()->job_factory()->IsSafeRedirectTarget(location); 1103} 1104 1105bool URLRequestHttpJob::NeedsAuth() { 1106 int code = GetResponseCode(); 1107 if (code == -1) 1108 return false; 1109 1110 // Check if we need either Proxy or WWW Authentication. This could happen 1111 // because we either provided no auth info, or provided incorrect info. 1112 switch (code) { 1113 case 407: 1114 if (proxy_auth_state_ == AUTH_STATE_CANCELED) 1115 return false; 1116 proxy_auth_state_ = AUTH_STATE_NEED_AUTH; 1117 return true; 1118 case 401: 1119 if (server_auth_state_ == AUTH_STATE_CANCELED) 1120 return false; 1121 server_auth_state_ = AUTH_STATE_NEED_AUTH; 1122 return true; 1123 } 1124 return false; 1125} 1126 1127void URLRequestHttpJob::GetAuthChallengeInfo( 1128 scoped_refptr<AuthChallengeInfo>* result) { 1129 DCHECK(transaction_.get()); 1130 DCHECK(response_info_); 1131 1132 // sanity checks: 1133 DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH || 1134 server_auth_state_ == AUTH_STATE_NEED_AUTH); 1135 DCHECK((GetResponseHeaders()->response_code() == HTTP_UNAUTHORIZED) || 1136 (GetResponseHeaders()->response_code() == 1137 HTTP_PROXY_AUTHENTICATION_REQUIRED)); 1138 1139 *result = response_info_->auth_challenge; 1140} 1141 1142void URLRequestHttpJob::SetAuth(const AuthCredentials& credentials) { 1143 DCHECK(transaction_.get()); 1144 1145 // Proxy gets set first, then WWW. 1146 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { 1147 proxy_auth_state_ = AUTH_STATE_HAVE_AUTH; 1148 } else { 1149 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); 1150 server_auth_state_ = AUTH_STATE_HAVE_AUTH; 1151 } 1152 1153 RestartTransactionWithAuth(credentials); 1154} 1155 1156void URLRequestHttpJob::CancelAuth() { 1157 // Proxy gets set first, then WWW. 1158 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { 1159 proxy_auth_state_ = AUTH_STATE_CANCELED; 1160 } else { 1161 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); 1162 server_auth_state_ = AUTH_STATE_CANCELED; 1163 } 1164 1165 // These will be reset in OnStartCompleted. 1166 response_info_ = NULL; 1167 receive_headers_end_ = base::TimeTicks::Now(); 1168 response_cookies_.clear(); 1169 1170 ResetTimer(); 1171 1172 // OK, let the consumer read the error page... 1173 // 1174 // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false, 1175 // which will cause the consumer to receive OnResponseStarted instead of 1176 // OnAuthRequired. 1177 // 1178 // We have to do this via InvokeLater to avoid "recursing" the consumer. 1179 // 1180 base::MessageLoop::current()->PostTask( 1181 FROM_HERE, 1182 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1183 weak_factory_.GetWeakPtr(), OK)); 1184} 1185 1186void URLRequestHttpJob::ContinueWithCertificate( 1187 X509Certificate* client_cert) { 1188 DCHECK(transaction_.get()); 1189 1190 DCHECK(!response_info_) << "should not have a response yet"; 1191 receive_headers_end_ = base::TimeTicks(); 1192 1193 ResetTimer(); 1194 1195 // No matter what, we want to report our status as IO pending since we will 1196 // be notifying our consumer asynchronously via OnStartCompleted. 1197 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1198 1199 int rv = transaction_->RestartWithCertificate(client_cert, start_callback_); 1200 if (rv == ERR_IO_PENDING) 1201 return; 1202 1203 // The transaction started synchronously, but we need to notify the 1204 // URLRequest delegate via the message loop. 1205 base::MessageLoop::current()->PostTask( 1206 FROM_HERE, 1207 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1208 weak_factory_.GetWeakPtr(), rv)); 1209} 1210 1211void URLRequestHttpJob::ContinueDespiteLastError() { 1212 // If the transaction was destroyed, then the job was cancelled. 1213 if (!transaction_.get()) 1214 return; 1215 1216 DCHECK(!response_info_) << "should not have a response yet"; 1217 receive_headers_end_ = base::TimeTicks(); 1218 1219 ResetTimer(); 1220 1221 // No matter what, we want to report our status as IO pending since we will 1222 // be notifying our consumer asynchronously via OnStartCompleted. 1223 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1224 1225 int rv = transaction_->RestartIgnoringLastError(start_callback_); 1226 if (rv == ERR_IO_PENDING) 1227 return; 1228 1229 // The transaction started synchronously, but we need to notify the 1230 // URLRequest delegate via the message loop. 1231 base::MessageLoop::current()->PostTask( 1232 FROM_HERE, 1233 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1234 weak_factory_.GetWeakPtr(), rv)); 1235} 1236 1237bool URLRequestHttpJob::ShouldFixMismatchedContentLength(int rv) const { 1238 // Some servers send the body compressed, but specify the content length as 1239 // the uncompressed size. Although this violates the HTTP spec we want to 1240 // support it (as IE and FireFox do), but *only* for an exact match. 1241 // See http://crbug.com/79694. 1242 if (rv == net::ERR_CONTENT_LENGTH_MISMATCH || 1243 rv == net::ERR_INCOMPLETE_CHUNKED_ENCODING) { 1244 if (request_ && request_->response_headers()) { 1245 int64 expected_length = request_->response_headers()->GetContentLength(); 1246 VLOG(1) << __FUNCTION__ << "() " 1247 << "\"" << request_->url().spec() << "\"" 1248 << " content-length = " << expected_length 1249 << " pre total = " << prefilter_bytes_read() 1250 << " post total = " << postfilter_bytes_read(); 1251 if (postfilter_bytes_read() == expected_length) { 1252 // Clear the error. 1253 return true; 1254 } 1255 } 1256 } 1257 return false; 1258} 1259 1260bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size, 1261 int* bytes_read) { 1262 DCHECK_NE(buf_size, 0); 1263 DCHECK(bytes_read); 1264 DCHECK(!read_in_progress_); 1265 1266 int rv = transaction_->Read( 1267 buf, buf_size, 1268 base::Bind(&URLRequestHttpJob::OnReadCompleted, base::Unretained(this))); 1269 1270 if (ShouldFixMismatchedContentLength(rv)) 1271 rv = 0; 1272 1273 if (rv >= 0) { 1274 *bytes_read = rv; 1275 if (!rv) 1276 DoneWithRequest(FINISHED); 1277 return true; 1278 } 1279 1280 if (rv == ERR_IO_PENDING) { 1281 read_in_progress_ = true; 1282 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1283 } else { 1284 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); 1285 } 1286 1287 return false; 1288} 1289 1290void URLRequestHttpJob::StopCaching() { 1291 if (transaction_.get()) 1292 transaction_->StopCaching(); 1293} 1294 1295bool URLRequestHttpJob::GetFullRequestHeaders( 1296 HttpRequestHeaders* headers) const { 1297 if (!transaction_) 1298 return false; 1299 1300 return transaction_->GetFullRequestHeaders(headers); 1301} 1302 1303void URLRequestHttpJob::DoneReading() { 1304 if (transaction_.get()) 1305 transaction_->DoneReading(); 1306 DoneWithRequest(FINISHED); 1307} 1308 1309HostPortPair URLRequestHttpJob::GetSocketAddress() const { 1310 return response_info_ ? response_info_->socket_address : HostPortPair(); 1311} 1312 1313void URLRequestHttpJob::RecordTimer() { 1314 if (request_creation_time_.is_null()) { 1315 NOTREACHED() 1316 << "The same transaction shouldn't start twice without new timing."; 1317 return; 1318 } 1319 1320 base::TimeDelta to_start = base::Time::Now() - request_creation_time_; 1321 request_creation_time_ = base::Time(); 1322 1323 UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte", to_start); 1324} 1325 1326void URLRequestHttpJob::ResetTimer() { 1327 if (!request_creation_time_.is_null()) { 1328 NOTREACHED() 1329 << "The timer was reset before it was recorded."; 1330 return; 1331 } 1332 request_creation_time_ = base::Time::Now(); 1333} 1334 1335void URLRequestHttpJob::UpdatePacketReadTimes() { 1336 if (!packet_timing_enabled_) 1337 return; 1338 1339 if (filter_input_byte_count() <= bytes_observed_in_packets_) { 1340 DCHECK_EQ(filter_input_byte_count(), bytes_observed_in_packets_); 1341 return; // No new bytes have arrived. 1342 } 1343 1344 final_packet_time_ = base::Time::Now(); 1345 if (!bytes_observed_in_packets_) 1346 request_time_snapshot_ = request_ ? request_->request_time() : base::Time(); 1347 1348 bytes_observed_in_packets_ = filter_input_byte_count(); 1349} 1350 1351void URLRequestHttpJob::RecordPacketStats( 1352 FilterContext::StatisticSelector statistic) const { 1353 if (!packet_timing_enabled_ || (final_packet_time_ == base::Time())) 1354 return; 1355 1356 base::TimeDelta duration = final_packet_time_ - request_time_snapshot_; 1357 switch (statistic) { 1358 case FilterContext::SDCH_DECODE: { 1359 UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b", 1360 static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100); 1361 return; 1362 } 1363 case FilterContext::SDCH_PASSTHROUGH: { 1364 // Despite advertising a dictionary, we handled non-sdch compressed 1365 // content. 1366 return; 1367 } 1368 1369 case FilterContext::SDCH_EXPERIMENT_DECODE: { 1370 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Decode", 1371 duration, 1372 base::TimeDelta::FromMilliseconds(20), 1373 base::TimeDelta::FromMinutes(10), 100); 1374 return; 1375 } 1376 case FilterContext::SDCH_EXPERIMENT_HOLDBACK: { 1377 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Holdback", 1378 duration, 1379 base::TimeDelta::FromMilliseconds(20), 1380 base::TimeDelta::FromMinutes(10), 100); 1381 return; 1382 } 1383 default: 1384 NOTREACHED(); 1385 return; 1386 } 1387} 1388 1389// The common type of histogram we use for all compression-tracking histograms. 1390#define COMPRESSION_HISTOGRAM(name, sample) \ 1391 do { \ 1392 UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \ 1393 500, 1000000, 100); \ 1394 } while (0) 1395 1396void URLRequestHttpJob::RecordCompressionHistograms() { 1397 DCHECK(request_); 1398 if (!request_) 1399 return; 1400 1401 if (is_cached_content_ || // Don't record cached content 1402 !GetStatus().is_success() || // Don't record failed content 1403 !IsCompressibleContent() || // Only record compressible content 1404 !prefilter_bytes_read()) // Zero-byte responses aren't useful. 1405 return; 1406 1407 // Miniature requests aren't really compressible. Don't count them. 1408 const int kMinSize = 16; 1409 if (prefilter_bytes_read() < kMinSize) 1410 return; 1411 1412 // Only record for http or https urls. 1413 bool is_http = request_->url().SchemeIs("http"); 1414 bool is_https = request_->url().SchemeIs("https"); 1415 if (!is_http && !is_https) 1416 return; 1417 1418 int compressed_B = prefilter_bytes_read(); 1419 int decompressed_B = postfilter_bytes_read(); 1420 bool was_filtered = HasFilter(); 1421 1422 // We want to record how often downloaded resources are compressed. 1423 // But, we recognize that different protocols may have different 1424 // properties. So, for each request, we'll put it into one of 3 1425 // groups: 1426 // a) SSL resources 1427 // Proxies cannot tamper with compression headers with SSL. 1428 // b) Non-SSL, loaded-via-proxy resources 1429 // In this case, we know a proxy might have interfered. 1430 // c) Non-SSL, loaded-without-proxy resources 1431 // In this case, we know there was no explicit proxy. However, 1432 // it is possible that a transparent proxy was still interfering. 1433 // 1434 // For each group, we record the same 3 histograms. 1435 1436 if (is_https) { 1437 if (was_filtered) { 1438 COMPRESSION_HISTOGRAM("SSL.BytesBeforeCompression", compressed_B); 1439 COMPRESSION_HISTOGRAM("SSL.BytesAfterCompression", decompressed_B); 1440 } else { 1441 COMPRESSION_HISTOGRAM("SSL.ShouldHaveBeenCompressed", decompressed_B); 1442 } 1443 return; 1444 } 1445 1446 if (request_->was_fetched_via_proxy()) { 1447 if (was_filtered) { 1448 COMPRESSION_HISTOGRAM("Proxy.BytesBeforeCompression", compressed_B); 1449 COMPRESSION_HISTOGRAM("Proxy.BytesAfterCompression", decompressed_B); 1450 } else { 1451 COMPRESSION_HISTOGRAM("Proxy.ShouldHaveBeenCompressed", decompressed_B); 1452 } 1453 return; 1454 } 1455 1456 if (was_filtered) { 1457 COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B); 1458 COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B); 1459 } else { 1460 COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B); 1461 } 1462} 1463 1464bool URLRequestHttpJob::IsCompressibleContent() const { 1465 std::string mime_type; 1466 return GetMimeType(&mime_type) && 1467 (IsSupportedJavascriptMimeType(mime_type.c_str()) || 1468 IsSupportedNonImageMimeType(mime_type.c_str())); 1469} 1470 1471void URLRequestHttpJob::RecordPerfHistograms(CompletionCause reason) { 1472 if (start_time_.is_null()) 1473 return; 1474 1475 base::TimeDelta total_time = base::TimeTicks::Now() - start_time_; 1476 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTime", total_time); 1477 1478 if (reason == FINISHED) { 1479 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeSuccess", total_time); 1480 } else { 1481 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCancel", total_time); 1482 } 1483 1484 if (response_info_) { 1485 if (response_info_->was_cached) { 1486 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCached", total_time); 1487 } else { 1488 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeNotCached", total_time); 1489 } 1490 } 1491 1492 start_time_ = base::TimeTicks(); 1493} 1494 1495void URLRequestHttpJob::DoneWithRequest(CompletionCause reason) { 1496 if (done_) 1497 return; 1498 done_ = true; 1499 RecordPerfHistograms(reason); 1500 if (reason == FINISHED) { 1501 request_->set_received_response_content_length(prefilter_bytes_read()); 1502 RecordCompressionHistograms(); 1503 } 1504} 1505 1506HttpResponseHeaders* URLRequestHttpJob::GetResponseHeaders() const { 1507 DCHECK(transaction_.get()); 1508 DCHECK(transaction_->GetResponseInfo()); 1509 return override_response_headers_.get() ? 1510 override_response_headers_.get() : 1511 transaction_->GetResponseInfo()->headers.get(); 1512} 1513 1514void URLRequestHttpJob::NotifyURLRequestDestroyed() { 1515 awaiting_callback_ = false; 1516} 1517 1518void URLRequestHttpJob::OnDetachRequest() { 1519 http_transaction_delegate_->OnDetachRequest(); 1520} 1521 1522} // namespace net 1523