url_request_http_job.cc revision c2e0dbddbe15c98d52c4786dac06cb8952a8ae6d
1// Copyright (c) 2012 The Chromium Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "net/url_request/url_request_http_job.h" 6 7#include "base/base_switches.h" 8#include "base/bind.h" 9#include "base/bind_helpers.h" 10#include "base/command_line.h" 11#include "base/compiler_specific.h" 12#include "base/file_util.h" 13#include "base/file_version_info.h" 14#include "base/message_loop.h" 15#include "base/metrics/field_trial.h" 16#include "base/metrics/histogram.h" 17#include "base/rand_util.h" 18#include "base/string_util.h" 19#include "base/time.h" 20#include "net/base/filter.h" 21#include "net/base/host_port_pair.h" 22#include "net/base/load_flags.h" 23#include "net/base/mime_util.h" 24#include "net/base/net_errors.h" 25#include "net/base/net_util.h" 26#include "net/base/network_delegate.h" 27#include "net/base/sdch_manager.h" 28#include "net/cert/cert_status_flags.h" 29#include "net/cookies/cookie_monster.h" 30#include "net/http/http_network_session.h" 31#include "net/http/http_request_headers.h" 32#include "net/http/http_response_headers.h" 33#include "net/http/http_response_info.h" 34#include "net/http/http_status_code.h" 35#include "net/http/http_transaction.h" 36#include "net/http/http_transaction_delegate.h" 37#include "net/http/http_transaction_factory.h" 38#include "net/http/http_util.h" 39#include "net/ssl/ssl_cert_request_info.h" 40#include "net/ssl/ssl_config_service.h" 41#include "net/url_request/fraudulent_certificate_reporter.h" 42#include "net/url_request/http_user_agent_settings.h" 43#include "net/url_request/url_request.h" 44#include "net/url_request/url_request_context.h" 45#include "net/url_request/url_request_error_job.h" 46#include "net/url_request/url_request_redirect_job.h" 47#include "net/url_request/url_request_throttler_header_adapter.h" 48#include "net/url_request/url_request_throttler_manager.h" 49 50static const char kAvailDictionaryHeader[] = "Avail-Dictionary"; 51 52namespace net { 53 54class URLRequestHttpJob::HttpFilterContext : public FilterContext { 55 public: 56 explicit HttpFilterContext(URLRequestHttpJob* job); 57 virtual ~HttpFilterContext(); 58 59 // FilterContext implementation. 60 virtual bool GetMimeType(std::string* mime_type) const OVERRIDE; 61 virtual bool GetURL(GURL* gurl) const OVERRIDE; 62 virtual base::Time GetRequestTime() const OVERRIDE; 63 virtual bool IsCachedContent() const OVERRIDE; 64 virtual bool IsDownload() const OVERRIDE; 65 virtual bool IsSdchResponse() const OVERRIDE; 66 virtual int64 GetByteReadCount() const OVERRIDE; 67 virtual int GetResponseCode() const OVERRIDE; 68 virtual void RecordPacketStats(StatisticSelector statistic) const OVERRIDE; 69 70 // Method to allow us to reset filter context for a response that should have 71 // been SDCH encoded when there is an update due to an explicit HTTP header. 72 void ResetSdchResponseToFalse(); 73 74 private: 75 URLRequestHttpJob* job_; 76 77 DISALLOW_COPY_AND_ASSIGN(HttpFilterContext); 78}; 79 80class URLRequestHttpJob::HttpTransactionDelegateImpl 81 : public HttpTransactionDelegate { 82 public: 83 HttpTransactionDelegateImpl( 84 URLRequest* request, NetworkDelegate* network_delegate) 85 : request_(request), 86 network_delegate_(network_delegate), 87 cache_active_(false), 88 network_active_(false) { 89 } 90 virtual ~HttpTransactionDelegateImpl() { 91 OnDetachRequest(); 92 } 93 void OnDetachRequest() { 94 if (request_ == NULL || network_delegate_ == NULL) 95 return; 96 network_delegate_->NotifyRequestWaitStateChange( 97 *request_, 98 NetworkDelegate::REQUEST_WAIT_STATE_RESET); 99 cache_active_ = false; 100 network_active_ = false; 101 request_ = NULL; 102 } 103 virtual void OnCacheActionStart() OVERRIDE { 104 if (request_ == NULL || network_delegate_ == NULL) 105 return; 106 DCHECK(!cache_active_ && !network_active_); 107 cache_active_ = true; 108 network_delegate_->NotifyRequestWaitStateChange( 109 *request_, 110 NetworkDelegate::REQUEST_WAIT_STATE_CACHE_START); 111 } 112 virtual void OnCacheActionFinish() OVERRIDE { 113 if (request_ == NULL || network_delegate_ == NULL) 114 return; 115 DCHECK(cache_active_ && !network_active_); 116 cache_active_ = false; 117 network_delegate_->NotifyRequestWaitStateChange( 118 *request_, 119 NetworkDelegate::REQUEST_WAIT_STATE_CACHE_FINISH); 120 } 121 virtual void OnNetworkActionStart() OVERRIDE { 122 if (request_ == NULL || network_delegate_ == NULL) 123 return; 124 DCHECK(!cache_active_ && !network_active_); 125 network_active_ = true; 126 network_delegate_->NotifyRequestWaitStateChange( 127 *request_, 128 NetworkDelegate::REQUEST_WAIT_STATE_NETWORK_START); 129 } 130 virtual void OnNetworkActionFinish() OVERRIDE { 131 if (request_ == NULL || network_delegate_ == NULL) 132 return; 133 DCHECK(!cache_active_ && network_active_); 134 network_active_ = false; 135 network_delegate_->NotifyRequestWaitStateChange( 136 *request_, 137 NetworkDelegate::REQUEST_WAIT_STATE_NETWORK_FINISH); 138 } 139 private: 140 URLRequest* request_; 141 NetworkDelegate* network_delegate_; 142 bool cache_active_; 143 bool network_active_; 144}; 145 146URLRequestHttpJob::HttpFilterContext::HttpFilterContext(URLRequestHttpJob* job) 147 : job_(job) { 148 DCHECK(job_); 149} 150 151URLRequestHttpJob::HttpFilterContext::~HttpFilterContext() { 152} 153 154bool URLRequestHttpJob::HttpFilterContext::GetMimeType( 155 std::string* mime_type) const { 156 return job_->GetMimeType(mime_type); 157} 158 159bool URLRequestHttpJob::HttpFilterContext::GetURL(GURL* gurl) const { 160 if (!job_->request()) 161 return false; 162 *gurl = job_->request()->url(); 163 return true; 164} 165 166base::Time URLRequestHttpJob::HttpFilterContext::GetRequestTime() const { 167 return job_->request() ? job_->request()->request_time() : base::Time(); 168} 169 170bool URLRequestHttpJob::HttpFilterContext::IsCachedContent() const { 171 return job_->is_cached_content_; 172} 173 174bool URLRequestHttpJob::HttpFilterContext::IsDownload() const { 175 return (job_->request_info_.load_flags & LOAD_IS_DOWNLOAD) != 0; 176} 177 178void URLRequestHttpJob::HttpFilterContext::ResetSdchResponseToFalse() { 179 DCHECK(job_->sdch_dictionary_advertised_); 180 job_->sdch_dictionary_advertised_ = false; 181} 182 183bool URLRequestHttpJob::HttpFilterContext::IsSdchResponse() const { 184 return job_->sdch_dictionary_advertised_; 185} 186 187int64 URLRequestHttpJob::HttpFilterContext::GetByteReadCount() const { 188 return job_->filter_input_byte_count(); 189} 190 191int URLRequestHttpJob::HttpFilterContext::GetResponseCode() const { 192 return job_->GetResponseCode(); 193} 194 195void URLRequestHttpJob::HttpFilterContext::RecordPacketStats( 196 StatisticSelector statistic) const { 197 job_->RecordPacketStats(statistic); 198} 199 200// TODO(darin): make sure the port blocking code is not lost 201// static 202URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request, 203 NetworkDelegate* network_delegate, 204 const std::string& scheme) { 205 DCHECK(scheme == "http" || scheme == "https"); 206 207 if (!request->context()->http_transaction_factory()) { 208 NOTREACHED() << "requires a valid context"; 209 return new URLRequestErrorJob( 210 request, network_delegate, ERR_INVALID_ARGUMENT); 211 } 212 213 GURL redirect_url; 214 if (request->GetHSTSRedirect(&redirect_url)) { 215 return new URLRequestRedirectJob( 216 request, network_delegate, redirect_url, 217 // Use status code 307 to preserve the method, so POST requests work. 218 URLRequestRedirectJob::REDIRECT_307_TEMPORARY_REDIRECT); 219 } 220 return new URLRequestHttpJob(request, 221 network_delegate, 222 request->context()->http_user_agent_settings()); 223} 224 225 226URLRequestHttpJob::URLRequestHttpJob( 227 URLRequest* request, 228 NetworkDelegate* network_delegate, 229 const HttpUserAgentSettings* http_user_agent_settings) 230 : URLRequestJob(request, network_delegate), 231 priority_(DEFAULT_PRIORITY), 232 response_info_(NULL), 233 response_cookies_save_index_(0), 234 proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH), 235 server_auth_state_(AUTH_STATE_DONT_NEED_AUTH), 236 start_callback_(base::Bind( 237 &URLRequestHttpJob::OnStartCompleted, base::Unretained(this))), 238 notify_before_headers_sent_callback_(base::Bind( 239 &URLRequestHttpJob::NotifyBeforeSendHeadersCallback, 240 base::Unretained(this))), 241 read_in_progress_(false), 242 transaction_(NULL), 243 throttling_entry_(NULL), 244 sdch_dictionary_advertised_(false), 245 sdch_test_activated_(false), 246 sdch_test_control_(false), 247 is_cached_content_(false), 248 request_creation_time_(), 249 packet_timing_enabled_(false), 250 done_(false), 251 bytes_observed_in_packets_(0), 252 request_time_snapshot_(), 253 final_packet_time_(), 254 filter_context_(new HttpFilterContext(this)), 255 weak_factory_(this), 256 on_headers_received_callback_(base::Bind( 257 &URLRequestHttpJob::OnHeadersReceivedCallback, 258 base::Unretained(this))), 259 awaiting_callback_(false), 260 http_transaction_delegate_(new HttpTransactionDelegateImpl( 261 request, network_delegate)), 262 http_user_agent_settings_(http_user_agent_settings) { 263 URLRequestThrottlerManager* manager = request->context()->throttler_manager(); 264 if (manager) 265 throttling_entry_ = manager->RegisterRequestUrl(request->url()); 266 267 ResetTimer(); 268} 269 270URLRequestHttpJob::~URLRequestHttpJob() { 271 CHECK(!awaiting_callback_); 272 273 DCHECK(!sdch_test_control_ || !sdch_test_activated_); 274 if (!is_cached_content_) { 275 if (sdch_test_control_) 276 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK); 277 if (sdch_test_activated_) 278 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE); 279 } 280 // Make sure SDCH filters are told to emit histogram data while 281 // filter_context_ is still alive. 282 DestroyFilters(); 283 284 if (sdch_dictionary_url_.is_valid()) { 285 // Prior to reaching the destructor, request_ has been set to a NULL 286 // pointer, so request_->url() is no longer valid in the destructor, and we 287 // use an alternate copy |request_info_.url|. 288 SdchManager* manager = SdchManager::Global(); 289 // To be extra safe, since this is a "different time" from when we decided 290 // to get the dictionary, we'll validate that an SdchManager is available. 291 // At shutdown time, care is taken to be sure that we don't delete this 292 // globally useful instance "too soon," so this check is just defensive 293 // coding to assure that IF the system is shutting down, we don't have any 294 // problem if the manager was deleted ahead of time. 295 if (manager) // Defensive programming. 296 manager->FetchDictionary(request_info_.url, sdch_dictionary_url_); 297 } 298 DoneWithRequest(ABORTED); 299} 300 301void URLRequestHttpJob::SetPriority(RequestPriority priority) { 302 priority_ = priority; 303 if (transaction_) 304 transaction_->SetPriority(priority_); 305} 306 307void URLRequestHttpJob::Start() { 308 DCHECK(!transaction_.get()); 309 310 // URLRequest::SetReferrer ensures that we do not send username and password 311 // fields in the referrer. 312 GURL referrer(request_->referrer()); 313 314 request_info_.url = request_->url(); 315 request_info_.method = request_->method(); 316 request_info_.load_flags = request_->load_flags(); 317 request_info_.request_id = request_->identifier(); 318 319 // Strip Referer from request_info_.extra_headers to prevent, e.g., plugins 320 // from overriding headers that are controlled using other means. Otherwise a 321 // plugin could set a referrer although sending the referrer is inhibited. 322 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kReferer); 323 324 // Our consumer should have made sure that this is a safe referrer. See for 325 // instance WebCore::FrameLoader::HideReferrer. 326 if (referrer.is_valid()) { 327 request_info_.extra_headers.SetHeader(HttpRequestHeaders::kReferer, 328 referrer.spec()); 329 } 330 331 request_info_.extra_headers.SetHeaderIfMissing( 332 HttpRequestHeaders::kUserAgent, 333 http_user_agent_settings_ ? 334 http_user_agent_settings_->GetUserAgent(request_->url()) : 335 EmptyString()); 336 337 AddExtraHeaders(); 338 AddCookieHeaderAndStart(); 339} 340 341void URLRequestHttpJob::Kill() { 342 http_transaction_delegate_->OnDetachRequest(); 343 344 if (!transaction_.get()) 345 return; 346 347 weak_factory_.InvalidateWeakPtrs(); 348 DestroyTransaction(); 349 URLRequestJob::Kill(); 350} 351 352void URLRequestHttpJob::NotifyHeadersComplete() { 353 DCHECK(!response_info_); 354 355 response_info_ = transaction_->GetResponseInfo(); 356 357 // Save boolean, as we'll need this info at destruction time, and filters may 358 // also need this info. 359 is_cached_content_ = response_info_->was_cached; 360 361 if (!is_cached_content_ && throttling_entry_) { 362 URLRequestThrottlerHeaderAdapter response_adapter(GetResponseHeaders()); 363 throttling_entry_->UpdateWithResponse(request_info_.url.host(), 364 &response_adapter); 365 } 366 367 // The ordering of these calls is not important. 368 ProcessStrictTransportSecurityHeader(); 369 ProcessPublicKeyPinsHeader(); 370 371 if (SdchManager::Global() && 372 SdchManager::Global()->IsInSupportedDomain(request_->url())) { 373 const std::string name = "Get-Dictionary"; 374 std::string url_text; 375 void* iter = NULL; 376 // TODO(jar): We need to not fetch dictionaries the first time they are 377 // seen, but rather wait until we can justify their usefulness. 378 // For now, we will only fetch the first dictionary, which will at least 379 // require multiple suggestions before we get additional ones for this site. 380 // Eventually we should wait until a dictionary is requested several times 381 // before we even download it (so that we don't waste memory or bandwidth). 382 if (GetResponseHeaders()->EnumerateHeader(&iter, name, &url_text)) { 383 // request_->url() won't be valid in the destructor, so we use an 384 // alternate copy. 385 DCHECK_EQ(request_->url(), request_info_.url); 386 // Resolve suggested URL relative to request url. 387 sdch_dictionary_url_ = request_info_.url.Resolve(url_text); 388 } 389 } 390 391 // The HTTP transaction may be restarted several times for the purposes 392 // of sending authorization information. Each time it restarts, we get 393 // notified of the headers completion so that we can update the cookie store. 394 if (transaction_->IsReadyToRestartForAuth()) { 395 DCHECK(!response_info_->auth_challenge.get()); 396 // TODO(battre): This breaks the webrequest API for 397 // URLRequestTestHTTP.BasicAuthWithCookies 398 // where OnBeforeSendHeaders -> OnSendHeaders -> OnBeforeSendHeaders 399 // occurs. 400 RestartTransactionWithAuth(AuthCredentials()); 401 return; 402 } 403 404 URLRequestJob::NotifyHeadersComplete(); 405} 406 407void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) { 408 DoneWithRequest(FINISHED); 409 URLRequestJob::NotifyDone(status); 410} 411 412void URLRequestHttpJob::DestroyTransaction() { 413 DCHECK(transaction_.get()); 414 415 DoneWithRequest(ABORTED); 416 transaction_.reset(); 417 response_info_ = NULL; 418} 419 420void URLRequestHttpJob::StartTransaction() { 421 if (network_delegate()) { 422 int rv = network_delegate()->NotifyBeforeSendHeaders( 423 request_, notify_before_headers_sent_callback_, 424 &request_info_.extra_headers); 425 // If an extension blocks the request, we rely on the callback to 426 // MaybeStartTransactionInternal(). 427 if (rv == ERR_IO_PENDING) { 428 SetBlockedOnDelegate(); 429 return; 430 } 431 MaybeStartTransactionInternal(rv); 432 return; 433 } 434 StartTransactionInternal(); 435} 436 437void URLRequestHttpJob::NotifyBeforeSendHeadersCallback(int result) { 438 SetUnblockedOnDelegate(); 439 440 // Check that there are no callbacks to already canceled requests. 441 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); 442 443 MaybeStartTransactionInternal(result); 444} 445 446void URLRequestHttpJob::MaybeStartTransactionInternal(int result) { 447 if (result == OK) { 448 StartTransactionInternal(); 449 } else { 450 std::string source("delegate"); 451 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 452 NetLog::StringCallback("source", &source)); 453 NotifyCanceled(); 454 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 455 } 456} 457 458void URLRequestHttpJob::StartTransactionInternal() { 459 // NOTE: This method assumes that request_info_ is already setup properly. 460 461 // If we already have a transaction, then we should restart the transaction 462 // with auth provided by auth_credentials_. 463 464 int rv; 465 466 if (network_delegate()) { 467 network_delegate()->NotifySendHeaders( 468 request_, request_info_.extra_headers); 469 } 470 471 if (transaction_.get()) { 472 rv = transaction_->RestartWithAuth(auth_credentials_, start_callback_); 473 auth_credentials_ = AuthCredentials(); 474 } else { 475 DCHECK(request_->context()->http_transaction_factory()); 476 477 rv = request_->context()->http_transaction_factory()->CreateTransaction( 478 priority_, &transaction_, http_transaction_delegate_.get()); 479 if (rv == OK) { 480 if (!throttling_entry_ || 481 !throttling_entry_->ShouldRejectRequest(*request_)) { 482 rv = transaction_->Start( 483 &request_info_, start_callback_, request_->net_log()); 484 start_time_ = base::TimeTicks::Now(); 485 } else { 486 // Special error code for the exponential back-off module. 487 rv = ERR_TEMPORARILY_THROTTLED; 488 } 489 } 490 } 491 492 if (rv == ERR_IO_PENDING) 493 return; 494 495 // The transaction started synchronously, but we need to notify the 496 // URLRequest delegate via the message loop. 497 MessageLoop::current()->PostTask( 498 FROM_HERE, 499 base::Bind(&URLRequestHttpJob::OnStartCompleted, 500 weak_factory_.GetWeakPtr(), rv)); 501} 502 503void URLRequestHttpJob::AddExtraHeaders() { 504 // Supply Accept-Encoding field only if it is not already provided. 505 // It should be provided IF the content is known to have restrictions on 506 // potential encoding, such as streaming multi-media. 507 // For details see bug 47381. 508 // TODO(jar, enal): jpeg files etc. should set up a request header if 509 // possible. Right now it is done only by buffered_resource_loader and 510 // simple_data_source. 511 if (!request_info_.extra_headers.HasHeader( 512 HttpRequestHeaders::kAcceptEncoding)) { 513 bool advertise_sdch = SdchManager::Global() && 514 SdchManager::Global()->IsInSupportedDomain(request_->url()); 515 std::string avail_dictionaries; 516 if (advertise_sdch) { 517 SdchManager::Global()->GetAvailDictionaryList(request_->url(), 518 &avail_dictionaries); 519 520 // The AllowLatencyExperiment() is only true if we've successfully done a 521 // full SDCH compression recently in this browser session for this host. 522 // Note that for this path, there might be no applicable dictionaries, 523 // and hence we can't participate in the experiment. 524 if (!avail_dictionaries.empty() && 525 SdchManager::Global()->AllowLatencyExperiment(request_->url())) { 526 // We are participating in the test (or control), and hence we'll 527 // eventually record statistics via either SDCH_EXPERIMENT_DECODE or 528 // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data. 529 packet_timing_enabled_ = true; 530 if (base::RandDouble() < .01) { 531 sdch_test_control_ = true; // 1% probability. 532 advertise_sdch = false; 533 } else { 534 sdch_test_activated_ = true; 535 } 536 } 537 } 538 539 // Supply Accept-Encoding headers first so that it is more likely that they 540 // will be in the first transmitted packet. This can sometimes make it 541 // easier to filter and analyze the streams to assure that a proxy has not 542 // damaged these headers. Some proxies deliberately corrupt Accept-Encoding 543 // headers. 544 if (!advertise_sdch) { 545 // Tell the server what compression formats we support (other than SDCH). 546 request_info_.extra_headers.SetHeader( 547 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate"); 548 } else { 549 // Include SDCH in acceptable list. 550 request_info_.extra_headers.SetHeader( 551 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch"); 552 if (!avail_dictionaries.empty()) { 553 request_info_.extra_headers.SetHeader( 554 kAvailDictionaryHeader, 555 avail_dictionaries); 556 sdch_dictionary_advertised_ = true; 557 // Since we're tagging this transaction as advertising a dictionary, 558 // we'll definitely employ an SDCH filter (or tentative sdch filter) 559 // when we get a response. When done, we'll record histograms via 560 // SDCH_DECODE or SDCH_PASSTHROUGH. Hence we need to record packet 561 // arrival times. 562 packet_timing_enabled_ = true; 563 } 564 } 565 } 566 567 if (http_user_agent_settings_) { 568 // Only add default Accept-Language if the request didn't have it 569 // specified. 570 std::string accept_language = 571 http_user_agent_settings_->GetAcceptLanguage(); 572 if (!accept_language.empty()) { 573 request_info_.extra_headers.SetHeaderIfMissing( 574 HttpRequestHeaders::kAcceptLanguage, 575 accept_language); 576 } 577 } 578} 579 580void URLRequestHttpJob::AddCookieHeaderAndStart() { 581 // No matter what, we want to report our status as IO pending since we will 582 // be notifying our consumer asynchronously via OnStartCompleted. 583 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 584 585 // If the request was destroyed, then there is no more work to do. 586 if (!request_) 587 return; 588 589 CookieStore* cookie_store = request_->context()->cookie_store(); 590 if (cookie_store && !(request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES)) { 591 net::CookieMonster* cookie_monster = cookie_store->GetCookieMonster(); 592 if (cookie_monster) { 593 cookie_monster->GetAllCookiesForURLAsync( 594 request_->url(), 595 base::Bind(&URLRequestHttpJob::CheckCookiePolicyAndLoad, 596 weak_factory_.GetWeakPtr())); 597 } else { 598 CheckCookiePolicyAndLoad(CookieList()); 599 } 600 } else { 601 DoStartTransaction(); 602 } 603} 604 605void URLRequestHttpJob::DoLoadCookies() { 606 CookieOptions options; 607 options.set_include_httponly(); 608 request_->context()->cookie_store()->GetCookiesWithOptionsAsync( 609 request_->url(), options, 610 base::Bind(&URLRequestHttpJob::OnCookiesLoaded, 611 weak_factory_.GetWeakPtr())); 612} 613 614void URLRequestHttpJob::CheckCookiePolicyAndLoad( 615 const CookieList& cookie_list) { 616 if (CanGetCookies(cookie_list)) 617 DoLoadCookies(); 618 else 619 DoStartTransaction(); 620} 621 622void URLRequestHttpJob::OnCookiesLoaded(const std::string& cookie_line) { 623 if (!cookie_line.empty()) { 624 request_info_.extra_headers.SetHeader( 625 HttpRequestHeaders::kCookie, cookie_line); 626 } 627 DoStartTransaction(); 628} 629 630void URLRequestHttpJob::DoStartTransaction() { 631 // We may have been canceled while retrieving cookies. 632 if (GetStatus().is_success()) { 633 StartTransaction(); 634 } else { 635 NotifyCanceled(); 636 } 637} 638 639void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete(int result) { 640 if (result != net::OK) { 641 std::string source("delegate"); 642 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 643 NetLog::StringCallback("source", &source)); 644 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 645 return; 646 } 647 648 DCHECK(transaction_.get()); 649 650 const HttpResponseInfo* response_info = transaction_->GetResponseInfo(); 651 DCHECK(response_info); 652 653 response_cookies_.clear(); 654 response_cookies_save_index_ = 0; 655 656 FetchResponseCookies(&response_cookies_); 657 658 if (!GetResponseHeaders()->GetDateValue(&response_date_)) 659 response_date_ = base::Time(); 660 661 // Now, loop over the response cookies, and attempt to persist each. 662 SaveNextCookie(); 663} 664 665// If the save occurs synchronously, SaveNextCookie will loop and save the next 666// cookie. If the save is deferred, the callback is responsible for continuing 667// to iterate through the cookies. 668// TODO(erikwright): Modify the CookieStore API to indicate via return value 669// whether it completed synchronously or asynchronously. 670// See http://crbug.com/131066. 671void URLRequestHttpJob::SaveNextCookie() { 672 // No matter what, we want to report our status as IO pending since we will 673 // be notifying our consumer asynchronously via OnStartCompleted. 674 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 675 676 // Used to communicate with the callback. See the implementation of 677 // OnCookieSaved. 678 scoped_refptr<SharedBoolean> callback_pending = new SharedBoolean(false); 679 scoped_refptr<SharedBoolean> save_next_cookie_running = 680 new SharedBoolean(true); 681 682 if (!(request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) && 683 request_->context()->cookie_store() && 684 response_cookies_.size() > 0) { 685 CookieOptions options; 686 options.set_include_httponly(); 687 options.set_server_time(response_date_); 688 689 net::CookieStore::SetCookiesCallback callback( 690 base::Bind(&URLRequestHttpJob::OnCookieSaved, 691 weak_factory_.GetWeakPtr(), 692 save_next_cookie_running, 693 callback_pending)); 694 695 // Loop through the cookies as long as SetCookieWithOptionsAsync completes 696 // synchronously. 697 while (!callback_pending->data && 698 response_cookies_save_index_ < response_cookies_.size()) { 699 if (CanSetCookie( 700 response_cookies_[response_cookies_save_index_], &options)) { 701 callback_pending->data = true; 702 request_->context()->cookie_store()->SetCookieWithOptionsAsync( 703 request_->url(), response_cookies_[response_cookies_save_index_], 704 options, callback); 705 } 706 ++response_cookies_save_index_; 707 } 708 } 709 710 save_next_cookie_running->data = false; 711 712 if (!callback_pending->data) { 713 response_cookies_.clear(); 714 response_cookies_save_index_ = 0; 715 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status 716 NotifyHeadersComplete(); 717 return; 718 } 719} 720 721// |save_next_cookie_running| is true when the callback is bound and set to 722// false when SaveNextCookie exits, allowing the callback to determine if the 723// save occurred synchronously or asynchronously. 724// |callback_pending| is false when the callback is invoked and will be set to 725// true by the callback, allowing SaveNextCookie to detect whether the save 726// occurred synchronously. 727// See SaveNextCookie() for more information. 728void URLRequestHttpJob::OnCookieSaved( 729 scoped_refptr<SharedBoolean> save_next_cookie_running, 730 scoped_refptr<SharedBoolean> callback_pending, 731 bool cookie_status) { 732 callback_pending->data = false; 733 734 // If we were called synchronously, return. 735 if (save_next_cookie_running->data) { 736 return; 737 } 738 739 // We were called asynchronously, so trigger the next save. 740 // We may have been canceled within OnSetCookie. 741 if (GetStatus().is_success()) { 742 SaveNextCookie(); 743 } else { 744 NotifyCanceled(); 745 } 746} 747 748void URLRequestHttpJob::FetchResponseCookies( 749 std::vector<std::string>* cookies) { 750 const std::string name = "Set-Cookie"; 751 std::string value; 752 753 void* iter = NULL; 754 HttpResponseHeaders* headers = GetResponseHeaders(); 755 while (headers->EnumerateHeader(&iter, name, &value)) { 756 if (!value.empty()) 757 cookies->push_back(value); 758 } 759} 760 761// NOTE: |ProcessStrictTransportSecurityHeader| and 762// |ProcessPublicKeyPinsHeader| have very similar structures, by design. 763void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() { 764 DCHECK(response_info_); 765 TransportSecurityState* security_state = 766 request_->context()->transport_security_state(); 767 const SSLInfo& ssl_info = response_info_->ssl_info; 768 769 // Only accept HSTS headers on HTTPS connections that have no 770 // certificate errors. 771 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) || 772 !security_state) 773 return; 774 775 // http://tools.ietf.org/html/draft-ietf-websec-strict-transport-sec: 776 // 777 // If a UA receives more than one STS header field in a HTTP response 778 // message over secure transport, then the UA MUST process only the 779 // first such header field. 780 HttpResponseHeaders* headers = GetResponseHeaders(); 781 std::string value; 782 if (headers->EnumerateHeader(NULL, "Strict-Transport-Security", &value)) 783 security_state->AddHSTSHeader(request_info_.url.host(), value); 784} 785 786void URLRequestHttpJob::ProcessPublicKeyPinsHeader() { 787 DCHECK(response_info_); 788 TransportSecurityState* security_state = 789 request_->context()->transport_security_state(); 790 const SSLInfo& ssl_info = response_info_->ssl_info; 791 792 // Only accept HPKP headers on HTTPS connections that have no 793 // certificate errors. 794 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) || 795 !security_state) 796 return; 797 798 // http://tools.ietf.org/html/draft-ietf-websec-key-pinning: 799 // 800 // If a UA receives more than one PKP header field in an HTTP 801 // response message over secure transport, then the UA MUST process 802 // only the first such header field. 803 HttpResponseHeaders* headers = GetResponseHeaders(); 804 std::string value; 805 if (headers->EnumerateHeader(NULL, "Public-Key-Pins", &value)) 806 security_state->AddHPKPHeader(request_info_.url.host(), value, ssl_info); 807} 808 809void URLRequestHttpJob::OnStartCompleted(int result) { 810 RecordTimer(); 811 812 // If the request was destroyed, then there is no more work to do. 813 if (!request_) 814 return; 815 816 // If the transaction was destroyed, then the job was cancelled, and 817 // we can just ignore this notification. 818 if (!transaction_.get()) 819 return; 820 821 // Clear the IO_PENDING status 822 SetStatus(URLRequestStatus()); 823 824 const URLRequestContext* context = request_->context(); 825 826 if (result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN && 827 transaction_->GetResponseInfo() != NULL) { 828 FraudulentCertificateReporter* reporter = 829 context->fraudulent_certificate_reporter(); 830 if (reporter != NULL) { 831 const SSLInfo& ssl_info = transaction_->GetResponseInfo()->ssl_info; 832 bool sni_available = SSLConfigService::IsSNIAvailable( 833 context->ssl_config_service()); 834 const std::string& host = request_->url().host(); 835 836 reporter->SendReport(host, ssl_info, sni_available); 837 } 838 } 839 840 if (result == OK) { 841 scoped_refptr<HttpResponseHeaders> headers = GetResponseHeaders(); 842 if (network_delegate()) { 843 // Note that |this| may not be deleted until 844 // |on_headers_received_callback_| or 845 // |NetworkDelegate::URLRequestDestroyed()| has been called. 846 int error = network_delegate()->NotifyHeadersReceived( 847 request_, on_headers_received_callback_, 848 headers, &override_response_headers_); 849 if (error != net::OK) { 850 if (error == net::ERR_IO_PENDING) { 851 awaiting_callback_ = true; 852 SetBlockedOnDelegate(); 853 } else { 854 std::string source("delegate"); 855 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 856 NetLog::StringCallback("source", 857 &source)); 858 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, error)); 859 } 860 return; 861 } 862 } 863 864 SaveCookiesAndNotifyHeadersComplete(net::OK); 865 } else if (IsCertificateError(result)) { 866 // We encountered an SSL certificate error. Ask our delegate to decide 867 // what we should do. 868 869 TransportSecurityState::DomainState domain_state; 870 const URLRequestContext* context = request_->context(); 871 const bool fatal = context->transport_security_state() && 872 context->transport_security_state()->GetDomainState( 873 request_info_.url.host(), 874 SSLConfigService::IsSNIAvailable(context->ssl_config_service()), 875 &domain_state) && 876 domain_state.ShouldSSLErrorsBeFatal(); 877 NotifySSLCertificateError(transaction_->GetResponseInfo()->ssl_info, fatal); 878 } else if (result == ERR_SSL_CLIENT_AUTH_CERT_NEEDED) { 879 NotifyCertificateRequested( 880 transaction_->GetResponseInfo()->cert_request_info); 881 } else { 882 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 883 } 884} 885 886void URLRequestHttpJob::OnHeadersReceivedCallback(int result) { 887 SetUnblockedOnDelegate(); 888 awaiting_callback_ = false; 889 890 // Check that there are no callbacks to already canceled requests. 891 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); 892 893 SaveCookiesAndNotifyHeadersComplete(result); 894} 895 896void URLRequestHttpJob::OnReadCompleted(int result) { 897 read_in_progress_ = false; 898 899 if (ShouldFixMismatchedContentLength(result)) 900 result = OK; 901 902 if (result == OK) { 903 NotifyDone(URLRequestStatus()); 904 } else if (result < 0) { 905 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); 906 } else { 907 // Clear the IO_PENDING status 908 SetStatus(URLRequestStatus()); 909 } 910 911 NotifyReadComplete(result); 912} 913 914void URLRequestHttpJob::RestartTransactionWithAuth( 915 const AuthCredentials& credentials) { 916 auth_credentials_ = credentials; 917 918 // These will be reset in OnStartCompleted. 919 response_info_ = NULL; 920 response_cookies_.clear(); 921 922 ResetTimer(); 923 924 // Update the cookies, since the cookie store may have been updated from the 925 // headers in the 401/407. Since cookies were already appended to 926 // extra_headers, we need to strip them out before adding them again. 927 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kCookie); 928 929 AddCookieHeaderAndStart(); 930} 931 932void URLRequestHttpJob::SetUpload(UploadDataStream* upload) { 933 DCHECK(!transaction_.get()) << "cannot change once started"; 934 request_info_.upload_data_stream = upload; 935} 936 937void URLRequestHttpJob::SetExtraRequestHeaders( 938 const HttpRequestHeaders& headers) { 939 DCHECK(!transaction_.get()) << "cannot change once started"; 940 request_info_.extra_headers.CopyFrom(headers); 941} 942 943LoadState URLRequestHttpJob::GetLoadState() const { 944 return transaction_.get() ? 945 transaction_->GetLoadState() : LOAD_STATE_IDLE; 946} 947 948UploadProgress URLRequestHttpJob::GetUploadProgress() const { 949 return transaction_.get() ? 950 transaction_->GetUploadProgress() : UploadProgress(); 951} 952 953bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const { 954 DCHECK(transaction_.get()); 955 956 if (!response_info_) 957 return false; 958 959 return GetResponseHeaders()->GetMimeType(mime_type); 960} 961 962bool URLRequestHttpJob::GetCharset(std::string* charset) { 963 DCHECK(transaction_.get()); 964 965 if (!response_info_) 966 return false; 967 968 return GetResponseHeaders()->GetCharset(charset); 969} 970 971void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) { 972 DCHECK(request_); 973 DCHECK(transaction_.get()); 974 975 if (response_info_) { 976 *info = *response_info_; 977 if (override_response_headers_) 978 info->headers = override_response_headers_; 979 } 980} 981 982void URLRequestHttpJob::GetLoadTimingInfo( 983 LoadTimingInfo* load_timing_info) const { 984 if (transaction_) 985 transaction_->GetLoadTimingInfo(load_timing_info); 986} 987 988bool URLRequestHttpJob::GetResponseCookies(std::vector<std::string>* cookies) { 989 DCHECK(transaction_.get()); 990 991 if (!response_info_) 992 return false; 993 994 // TODO(darin): Why are we extracting response cookies again? Perhaps we 995 // should just leverage response_cookies_. 996 997 cookies->clear(); 998 FetchResponseCookies(cookies); 999 return true; 1000} 1001 1002int URLRequestHttpJob::GetResponseCode() const { 1003 DCHECK(transaction_.get()); 1004 1005 if (!response_info_) 1006 return -1; 1007 1008 return GetResponseHeaders()->response_code(); 1009} 1010 1011Filter* URLRequestHttpJob::SetupFilter() const { 1012 DCHECK(transaction_.get()); 1013 if (!response_info_) 1014 return NULL; 1015 1016 std::vector<Filter::FilterType> encoding_types; 1017 std::string encoding_type; 1018 HttpResponseHeaders* headers = GetResponseHeaders(); 1019 void* iter = NULL; 1020 while (headers->EnumerateHeader(&iter, "Content-Encoding", &encoding_type)) { 1021 encoding_types.push_back(Filter::ConvertEncodingToType(encoding_type)); 1022 } 1023 1024 if (filter_context_->IsSdchResponse()) { 1025 // We are wary of proxies that discard or damage SDCH encoding. If a server 1026 // explicitly states that this is not SDCH content, then we can correct our 1027 // assumption that this is an SDCH response, and avoid the need to recover 1028 // as though the content is corrupted (when we discover it is not SDCH 1029 // encoded). 1030 std::string sdch_response_status; 1031 iter = NULL; 1032 while (headers->EnumerateHeader(&iter, "X-Sdch-Encode", 1033 &sdch_response_status)) { 1034 if (sdch_response_status == "0") { 1035 filter_context_->ResetSdchResponseToFalse(); 1036 break; 1037 } 1038 } 1039 } 1040 1041 // Even if encoding types are empty, there is a chance that we need to add 1042 // some decoding, as some proxies strip encoding completely. In such cases, 1043 // we may need to add (for example) SDCH filtering (when the context suggests 1044 // it is appropriate). 1045 Filter::FixupEncodingTypes(*filter_context_, &encoding_types); 1046 1047 return !encoding_types.empty() 1048 ? Filter::Factory(encoding_types, *filter_context_) : NULL; 1049} 1050 1051bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) { 1052 // We only allow redirects to certain "safe" protocols. This does not 1053 // restrict redirects to externally handled protocols. Our consumer would 1054 // need to take care of those. 1055 1056 if (!URLRequest::IsHandledURL(location)) 1057 return true; 1058 1059 static const char* kSafeSchemes[] = { 1060 "http", 1061 "https", 1062 "ftp" 1063 }; 1064 1065 for (size_t i = 0; i < arraysize(kSafeSchemes); ++i) { 1066 if (location.SchemeIs(kSafeSchemes[i])) 1067 return true; 1068 } 1069 1070 return false; 1071} 1072 1073bool URLRequestHttpJob::NeedsAuth() { 1074 int code = GetResponseCode(); 1075 if (code == -1) 1076 return false; 1077 1078 // Check if we need either Proxy or WWW Authentication. This could happen 1079 // because we either provided no auth info, or provided incorrect info. 1080 switch (code) { 1081 case 407: 1082 if (proxy_auth_state_ == AUTH_STATE_CANCELED) 1083 return false; 1084 proxy_auth_state_ = AUTH_STATE_NEED_AUTH; 1085 return true; 1086 case 401: 1087 if (server_auth_state_ == AUTH_STATE_CANCELED) 1088 return false; 1089 server_auth_state_ = AUTH_STATE_NEED_AUTH; 1090 return true; 1091 } 1092 return false; 1093} 1094 1095void URLRequestHttpJob::GetAuthChallengeInfo( 1096 scoped_refptr<AuthChallengeInfo>* result) { 1097 DCHECK(transaction_.get()); 1098 DCHECK(response_info_); 1099 1100 // sanity checks: 1101 DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH || 1102 server_auth_state_ == AUTH_STATE_NEED_AUTH); 1103 DCHECK((GetResponseHeaders()->response_code() == HTTP_UNAUTHORIZED) || 1104 (GetResponseHeaders()->response_code() == 1105 HTTP_PROXY_AUTHENTICATION_REQUIRED)); 1106 1107 *result = response_info_->auth_challenge; 1108} 1109 1110void URLRequestHttpJob::SetAuth(const AuthCredentials& credentials) { 1111 DCHECK(transaction_.get()); 1112 1113 // Proxy gets set first, then WWW. 1114 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { 1115 proxy_auth_state_ = AUTH_STATE_HAVE_AUTH; 1116 } else { 1117 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); 1118 server_auth_state_ = AUTH_STATE_HAVE_AUTH; 1119 } 1120 1121 RestartTransactionWithAuth(credentials); 1122} 1123 1124void URLRequestHttpJob::CancelAuth() { 1125 // Proxy gets set first, then WWW. 1126 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { 1127 proxy_auth_state_ = AUTH_STATE_CANCELED; 1128 } else { 1129 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); 1130 server_auth_state_ = AUTH_STATE_CANCELED; 1131 } 1132 1133 // These will be reset in OnStartCompleted. 1134 response_info_ = NULL; 1135 response_cookies_.clear(); 1136 1137 ResetTimer(); 1138 1139 // OK, let the consumer read the error page... 1140 // 1141 // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false, 1142 // which will cause the consumer to receive OnResponseStarted instead of 1143 // OnAuthRequired. 1144 // 1145 // We have to do this via InvokeLater to avoid "recursing" the consumer. 1146 // 1147 MessageLoop::current()->PostTask( 1148 FROM_HERE, 1149 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1150 weak_factory_.GetWeakPtr(), OK)); 1151} 1152 1153void URLRequestHttpJob::ContinueWithCertificate( 1154 X509Certificate* client_cert) { 1155 DCHECK(transaction_.get()); 1156 1157 DCHECK(!response_info_) << "should not have a response yet"; 1158 1159 ResetTimer(); 1160 1161 // No matter what, we want to report our status as IO pending since we will 1162 // be notifying our consumer asynchronously via OnStartCompleted. 1163 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1164 1165 int rv = transaction_->RestartWithCertificate(client_cert, start_callback_); 1166 if (rv == ERR_IO_PENDING) 1167 return; 1168 1169 // The transaction started synchronously, but we need to notify the 1170 // URLRequest delegate via the message loop. 1171 MessageLoop::current()->PostTask( 1172 FROM_HERE, 1173 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1174 weak_factory_.GetWeakPtr(), rv)); 1175} 1176 1177void URLRequestHttpJob::ContinueDespiteLastError() { 1178 // If the transaction was destroyed, then the job was cancelled. 1179 if (!transaction_.get()) 1180 return; 1181 1182 DCHECK(!response_info_) << "should not have a response yet"; 1183 1184 ResetTimer(); 1185 1186 // No matter what, we want to report our status as IO pending since we will 1187 // be notifying our consumer asynchronously via OnStartCompleted. 1188 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1189 1190 int rv = transaction_->RestartIgnoringLastError(start_callback_); 1191 if (rv == ERR_IO_PENDING) 1192 return; 1193 1194 // The transaction started synchronously, but we need to notify the 1195 // URLRequest delegate via the message loop. 1196 MessageLoop::current()->PostTask( 1197 FROM_HERE, 1198 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1199 weak_factory_.GetWeakPtr(), rv)); 1200} 1201 1202bool URLRequestHttpJob::ShouldFixMismatchedContentLength(int rv) const { 1203 // Some servers send the body compressed, but specify the content length as 1204 // the uncompressed size. Although this violates the HTTP spec we want to 1205 // support it (as IE and FireFox do), but *only* for an exact match. 1206 // See http://crbug.com/79694. 1207 if (rv == net::ERR_CONTENT_LENGTH_MISMATCH || 1208 rv == net::ERR_INCOMPLETE_CHUNKED_ENCODING) { 1209 if (request_ && request_->response_headers()) { 1210 int64 expected_length = request_->response_headers()->GetContentLength(); 1211 VLOG(1) << __FUNCTION__ << "() " 1212 << "\"" << request_->url().spec() << "\"" 1213 << " content-length = " << expected_length 1214 << " pre total = " << prefilter_bytes_read() 1215 << " post total = " << postfilter_bytes_read(); 1216 if (postfilter_bytes_read() == expected_length) { 1217 // Clear the error. 1218 return true; 1219 } 1220 } 1221 } 1222 return false; 1223} 1224 1225bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size, 1226 int* bytes_read) { 1227 DCHECK_NE(buf_size, 0); 1228 DCHECK(bytes_read); 1229 DCHECK(!read_in_progress_); 1230 1231 int rv = transaction_->Read( 1232 buf, buf_size, 1233 base::Bind(&URLRequestHttpJob::OnReadCompleted, base::Unretained(this))); 1234 1235 if (ShouldFixMismatchedContentLength(rv)) 1236 rv = 0; 1237 1238 if (rv >= 0) { 1239 *bytes_read = rv; 1240 if (!rv) 1241 DoneWithRequest(FINISHED); 1242 return true; 1243 } 1244 1245 if (rv == ERR_IO_PENDING) { 1246 read_in_progress_ = true; 1247 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1248 } else { 1249 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); 1250 } 1251 1252 return false; 1253} 1254 1255void URLRequestHttpJob::StopCaching() { 1256 if (transaction_.get()) 1257 transaction_->StopCaching(); 1258} 1259 1260void URLRequestHttpJob::DoneReading() { 1261 if (transaction_.get()) 1262 transaction_->DoneReading(); 1263 DoneWithRequest(FINISHED); 1264} 1265 1266HostPortPair URLRequestHttpJob::GetSocketAddress() const { 1267 return response_info_ ? response_info_->socket_address : HostPortPair(); 1268} 1269 1270void URLRequestHttpJob::RecordTimer() { 1271 if (request_creation_time_.is_null()) { 1272 NOTREACHED() 1273 << "The same transaction shouldn't start twice without new timing."; 1274 return; 1275 } 1276 1277 base::TimeDelta to_start = base::Time::Now() - request_creation_time_; 1278 request_creation_time_ = base::Time(); 1279 1280 UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte", to_start); 1281} 1282 1283void URLRequestHttpJob::ResetTimer() { 1284 if (!request_creation_time_.is_null()) { 1285 NOTREACHED() 1286 << "The timer was reset before it was recorded."; 1287 return; 1288 } 1289 request_creation_time_ = base::Time::Now(); 1290} 1291 1292void URLRequestHttpJob::UpdatePacketReadTimes() { 1293 if (!packet_timing_enabled_) 1294 return; 1295 1296 if (filter_input_byte_count() <= bytes_observed_in_packets_) { 1297 DCHECK_EQ(filter_input_byte_count(), bytes_observed_in_packets_); 1298 return; // No new bytes have arrived. 1299 } 1300 1301 final_packet_time_ = base::Time::Now(); 1302 if (!bytes_observed_in_packets_) 1303 request_time_snapshot_ = request_ ? request_->request_time() : base::Time(); 1304 1305 bytes_observed_in_packets_ = filter_input_byte_count(); 1306} 1307 1308void URLRequestHttpJob::RecordPacketStats( 1309 FilterContext::StatisticSelector statistic) const { 1310 if (!packet_timing_enabled_ || (final_packet_time_ == base::Time())) 1311 return; 1312 1313 base::TimeDelta duration = final_packet_time_ - request_time_snapshot_; 1314 switch (statistic) { 1315 case FilterContext::SDCH_DECODE: { 1316 UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b", 1317 static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100); 1318 return; 1319 } 1320 case FilterContext::SDCH_PASSTHROUGH: { 1321 // Despite advertising a dictionary, we handled non-sdch compressed 1322 // content. 1323 return; 1324 } 1325 1326 case FilterContext::SDCH_EXPERIMENT_DECODE: { 1327 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Decode", 1328 duration, 1329 base::TimeDelta::FromMilliseconds(20), 1330 base::TimeDelta::FromMinutes(10), 100); 1331 return; 1332 } 1333 case FilterContext::SDCH_EXPERIMENT_HOLDBACK: { 1334 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Holdback", 1335 duration, 1336 base::TimeDelta::FromMilliseconds(20), 1337 base::TimeDelta::FromMinutes(10), 100); 1338 return; 1339 } 1340 default: 1341 NOTREACHED(); 1342 return; 1343 } 1344} 1345 1346// The common type of histogram we use for all compression-tracking histograms. 1347#define COMPRESSION_HISTOGRAM(name, sample) \ 1348 do { \ 1349 UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \ 1350 500, 1000000, 100); \ 1351 } while (0) 1352 1353void URLRequestHttpJob::RecordCompressionHistograms() { 1354 DCHECK(request_); 1355 if (!request_) 1356 return; 1357 1358 if (is_cached_content_ || // Don't record cached content 1359 !GetStatus().is_success() || // Don't record failed content 1360 !IsCompressibleContent() || // Only record compressible content 1361 !prefilter_bytes_read()) // Zero-byte responses aren't useful. 1362 return; 1363 1364 // Miniature requests aren't really compressible. Don't count them. 1365 const int kMinSize = 16; 1366 if (prefilter_bytes_read() < kMinSize) 1367 return; 1368 1369 // Only record for http or https urls. 1370 bool is_http = request_->url().SchemeIs("http"); 1371 bool is_https = request_->url().SchemeIs("https"); 1372 if (!is_http && !is_https) 1373 return; 1374 1375 int compressed_B = prefilter_bytes_read(); 1376 int decompressed_B = postfilter_bytes_read(); 1377 bool was_filtered = HasFilter(); 1378 1379 // We want to record how often downloaded resources are compressed. 1380 // But, we recognize that different protocols may have different 1381 // properties. So, for each request, we'll put it into one of 3 1382 // groups: 1383 // a) SSL resources 1384 // Proxies cannot tamper with compression headers with SSL. 1385 // b) Non-SSL, loaded-via-proxy resources 1386 // In this case, we know a proxy might have interfered. 1387 // c) Non-SSL, loaded-without-proxy resources 1388 // In this case, we know there was no explicit proxy. However, 1389 // it is possible that a transparent proxy was still interfering. 1390 // 1391 // For each group, we record the same 3 histograms. 1392 1393 if (is_https) { 1394 if (was_filtered) { 1395 COMPRESSION_HISTOGRAM("SSL.BytesBeforeCompression", compressed_B); 1396 COMPRESSION_HISTOGRAM("SSL.BytesAfterCompression", decompressed_B); 1397 } else { 1398 COMPRESSION_HISTOGRAM("SSL.ShouldHaveBeenCompressed", decompressed_B); 1399 } 1400 return; 1401 } 1402 1403 if (request_->was_fetched_via_proxy()) { 1404 if (was_filtered) { 1405 COMPRESSION_HISTOGRAM("Proxy.BytesBeforeCompression", compressed_B); 1406 COMPRESSION_HISTOGRAM("Proxy.BytesAfterCompression", decompressed_B); 1407 } else { 1408 COMPRESSION_HISTOGRAM("Proxy.ShouldHaveBeenCompressed", decompressed_B); 1409 } 1410 return; 1411 } 1412 1413 if (was_filtered) { 1414 COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B); 1415 COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B); 1416 } else { 1417 COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B); 1418 } 1419} 1420 1421bool URLRequestHttpJob::IsCompressibleContent() const { 1422 std::string mime_type; 1423 return GetMimeType(&mime_type) && 1424 (IsSupportedJavascriptMimeType(mime_type.c_str()) || 1425 IsSupportedNonImageMimeType(mime_type.c_str())); 1426} 1427 1428void URLRequestHttpJob::RecordPerfHistograms(CompletionCause reason) { 1429 if (start_time_.is_null()) 1430 return; 1431 1432 base::TimeDelta total_time = base::TimeTicks::Now() - start_time_; 1433 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTime", total_time); 1434 1435 if (reason == FINISHED) { 1436 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeSuccess", total_time); 1437 } else { 1438 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCancel", total_time); 1439 } 1440 1441 if (response_info_) { 1442 if (response_info_->was_cached) { 1443 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCached", total_time); 1444 } else { 1445 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeNotCached", total_time); 1446 } 1447 } 1448 1449 start_time_ = base::TimeTicks(); 1450} 1451 1452void URLRequestHttpJob::DoneWithRequest(CompletionCause reason) { 1453 if (done_) 1454 return; 1455 done_ = true; 1456 RecordPerfHistograms(reason); 1457 if (reason == FINISHED) { 1458 request_->set_received_response_content_length(prefilter_bytes_read()); 1459 RecordCompressionHistograms(); 1460 } 1461} 1462 1463HttpResponseHeaders* URLRequestHttpJob::GetResponseHeaders() const { 1464 DCHECK(transaction_.get()); 1465 DCHECK(transaction_->GetResponseInfo()); 1466 return override_response_headers_.get() ? 1467 override_response_headers_ : 1468 transaction_->GetResponseInfo()->headers; 1469} 1470 1471void URLRequestHttpJob::NotifyURLRequestDestroyed() { 1472 awaiting_callback_ = false; 1473} 1474 1475void URLRequestHttpJob::OnDetachRequest() { 1476 http_transaction_delegate_->OnDetachRequest(); 1477} 1478 1479} // namespace net 1480