url_request_http_job.cc revision 5d1f7b1de12d16ceb2c938c56701a3e8bfa558f7
1// Copyright (c) 2012 The Chromium Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "net/url_request/url_request_http_job.h" 6 7#include "base/base_switches.h" 8#include "base/bind.h" 9#include "base/bind_helpers.h" 10#include "base/command_line.h" 11#include "base/compiler_specific.h" 12#include "base/file_version_info.h" 13#include "base/message_loop/message_loop.h" 14#include "base/metrics/field_trial.h" 15#include "base/metrics/histogram.h" 16#include "base/rand_util.h" 17#include "base/strings/string_util.h" 18#include "base/time/time.h" 19#include "net/base/host_port_pair.h" 20#include "net/base/load_flags.h" 21#include "net/base/mime_util.h" 22#include "net/base/net_errors.h" 23#include "net/base/net_util.h" 24#include "net/base/network_delegate.h" 25#include "net/base/sdch_manager.h" 26#include "net/cert/cert_status_flags.h" 27#include "net/cookies/cookie_monster.h" 28#include "net/http/http_network_session.h" 29#include "net/http/http_request_headers.h" 30#include "net/http/http_response_headers.h" 31#include "net/http/http_response_info.h" 32#include "net/http/http_status_code.h" 33#include "net/http/http_transaction.h" 34#include "net/http/http_transaction_factory.h" 35#include "net/http/http_util.h" 36#include "net/ssl/ssl_cert_request_info.h" 37#include "net/ssl/ssl_config_service.h" 38#include "net/url_request/fraudulent_certificate_reporter.h" 39#include "net/url_request/http_user_agent_settings.h" 40#include "net/url_request/url_request.h" 41#include "net/url_request/url_request_context.h" 42#include "net/url_request/url_request_error_job.h" 43#include "net/url_request/url_request_job_factory.h" 44#include "net/url_request/url_request_redirect_job.h" 45#include "net/url_request/url_request_throttler_header_adapter.h" 46#include "net/url_request/url_request_throttler_manager.h" 47#include "net/websockets/websocket_handshake_stream_base.h" 48 49static const char kAvailDictionaryHeader[] = "Avail-Dictionary"; 50 51namespace net { 52 53class URLRequestHttpJob::HttpFilterContext : public FilterContext { 54 public: 55 explicit HttpFilterContext(URLRequestHttpJob* job); 56 virtual ~HttpFilterContext(); 57 58 // FilterContext implementation. 59 virtual bool GetMimeType(std::string* mime_type) const OVERRIDE; 60 virtual bool GetURL(GURL* gurl) const OVERRIDE; 61 virtual base::Time GetRequestTime() const OVERRIDE; 62 virtual bool IsCachedContent() const OVERRIDE; 63 virtual bool IsDownload() const OVERRIDE; 64 virtual bool IsSdchResponse() const OVERRIDE; 65 virtual int64 GetByteReadCount() const OVERRIDE; 66 virtual int GetResponseCode() const OVERRIDE; 67 virtual void RecordPacketStats(StatisticSelector statistic) const OVERRIDE; 68 69 // Method to allow us to reset filter context for a response that should have 70 // been SDCH encoded when there is an update due to an explicit HTTP header. 71 void ResetSdchResponseToFalse(); 72 73 private: 74 URLRequestHttpJob* job_; 75 76 DISALLOW_COPY_AND_ASSIGN(HttpFilterContext); 77}; 78 79URLRequestHttpJob::HttpFilterContext::HttpFilterContext(URLRequestHttpJob* job) 80 : job_(job) { 81 DCHECK(job_); 82} 83 84URLRequestHttpJob::HttpFilterContext::~HttpFilterContext() { 85} 86 87bool URLRequestHttpJob::HttpFilterContext::GetMimeType( 88 std::string* mime_type) const { 89 return job_->GetMimeType(mime_type); 90} 91 92bool URLRequestHttpJob::HttpFilterContext::GetURL(GURL* gurl) const { 93 if (!job_->request()) 94 return false; 95 *gurl = job_->request()->url(); 96 return true; 97} 98 99base::Time URLRequestHttpJob::HttpFilterContext::GetRequestTime() const { 100 return job_->request() ? job_->request()->request_time() : base::Time(); 101} 102 103bool URLRequestHttpJob::HttpFilterContext::IsCachedContent() const { 104 return job_->is_cached_content_; 105} 106 107bool URLRequestHttpJob::HttpFilterContext::IsDownload() const { 108 return (job_->request_info_.load_flags & LOAD_IS_DOWNLOAD) != 0; 109} 110 111void URLRequestHttpJob::HttpFilterContext::ResetSdchResponseToFalse() { 112 DCHECK(job_->sdch_dictionary_advertised_); 113 job_->sdch_dictionary_advertised_ = false; 114} 115 116bool URLRequestHttpJob::HttpFilterContext::IsSdchResponse() const { 117 return job_->sdch_dictionary_advertised_; 118} 119 120int64 URLRequestHttpJob::HttpFilterContext::GetByteReadCount() const { 121 return job_->filter_input_byte_count(); 122} 123 124int URLRequestHttpJob::HttpFilterContext::GetResponseCode() const { 125 return job_->GetResponseCode(); 126} 127 128void URLRequestHttpJob::HttpFilterContext::RecordPacketStats( 129 StatisticSelector statistic) const { 130 job_->RecordPacketStats(statistic); 131} 132 133// TODO(darin): make sure the port blocking code is not lost 134// static 135URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request, 136 NetworkDelegate* network_delegate, 137 const std::string& scheme) { 138 DCHECK(scheme == "http" || scheme == "https" || scheme == "ws" || 139 scheme == "wss"); 140 141 if (!request->context()->http_transaction_factory()) { 142 NOTREACHED() << "requires a valid context"; 143 return new URLRequestErrorJob( 144 request, network_delegate, ERR_INVALID_ARGUMENT); 145 } 146 147 GURL redirect_url; 148 if (request->GetHSTSRedirect(&redirect_url)) { 149 return new URLRequestRedirectJob( 150 request, network_delegate, redirect_url, 151 // Use status code 307 to preserve the method, so POST requests work. 152 URLRequestRedirectJob::REDIRECT_307_TEMPORARY_REDIRECT); 153 } 154 return new URLRequestHttpJob(request, 155 network_delegate, 156 request->context()->http_user_agent_settings()); 157} 158 159URLRequestHttpJob::URLRequestHttpJob( 160 URLRequest* request, 161 NetworkDelegate* network_delegate, 162 const HttpUserAgentSettings* http_user_agent_settings) 163 : URLRequestJob(request, network_delegate), 164 priority_(DEFAULT_PRIORITY), 165 response_info_(NULL), 166 response_cookies_save_index_(0), 167 proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH), 168 server_auth_state_(AUTH_STATE_DONT_NEED_AUTH), 169 start_callback_(base::Bind(&URLRequestHttpJob::OnStartCompleted, 170 base::Unretained(this))), 171 notify_before_headers_sent_callback_( 172 base::Bind(&URLRequestHttpJob::NotifyBeforeSendHeadersCallback, 173 base::Unretained(this))), 174 read_in_progress_(false), 175 throttling_entry_(NULL), 176 sdch_dictionary_advertised_(false), 177 sdch_test_activated_(false), 178 sdch_test_control_(false), 179 is_cached_content_(false), 180 request_creation_time_(), 181 packet_timing_enabled_(false), 182 done_(false), 183 bytes_observed_in_packets_(0), 184 request_time_snapshot_(), 185 final_packet_time_(), 186 filter_context_(new HttpFilterContext(this)), 187 weak_factory_(this), 188 on_headers_received_callback_( 189 base::Bind(&URLRequestHttpJob::OnHeadersReceivedCallback, 190 base::Unretained(this))), 191 awaiting_callback_(false), 192 http_user_agent_settings_(http_user_agent_settings) { 193 URLRequestThrottlerManager* manager = request->context()->throttler_manager(); 194 if (manager) 195 throttling_entry_ = manager->RegisterRequestUrl(request->url()); 196 197 ResetTimer(); 198} 199 200URLRequestHttpJob::~URLRequestHttpJob() { 201 CHECK(!awaiting_callback_); 202 203 DCHECK(!sdch_test_control_ || !sdch_test_activated_); 204 if (!is_cached_content_) { 205 if (sdch_test_control_) 206 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK); 207 if (sdch_test_activated_) 208 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE); 209 } 210 // Make sure SDCH filters are told to emit histogram data while 211 // filter_context_ is still alive. 212 DestroyFilters(); 213 214 if (sdch_dictionary_url_.is_valid()) { 215 // Prior to reaching the destructor, request_ has been set to a NULL 216 // pointer, so request_->url() is no longer valid in the destructor, and we 217 // use an alternate copy |request_info_.url|. 218 SdchManager* manager = SdchManager::Global(); 219 // To be extra safe, since this is a "different time" from when we decided 220 // to get the dictionary, we'll validate that an SdchManager is available. 221 // At shutdown time, care is taken to be sure that we don't delete this 222 // globally useful instance "too soon," so this check is just defensive 223 // coding to assure that IF the system is shutting down, we don't have any 224 // problem if the manager was deleted ahead of time. 225 if (manager) // Defensive programming. 226 manager->FetchDictionary(request_info_.url, sdch_dictionary_url_); 227 } 228 DoneWithRequest(ABORTED); 229} 230 231void URLRequestHttpJob::SetPriority(RequestPriority priority) { 232 priority_ = priority; 233 if (transaction_) 234 transaction_->SetPriority(priority_); 235} 236 237void URLRequestHttpJob::Start() { 238 DCHECK(!transaction_.get()); 239 240 // URLRequest::SetReferrer ensures that we do not send username and password 241 // fields in the referrer. 242 GURL referrer(request_->referrer()); 243 244 request_info_.url = request_->url(); 245 request_info_.method = request_->method(); 246 request_info_.load_flags = request_->load_flags(); 247 // Enable privacy mode if cookie settings or flags tell us not send or 248 // save cookies. 249 bool enable_privacy_mode = 250 (request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES) || 251 (request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) || 252 CanEnablePrivacyMode(); 253 // Privacy mode could still be disabled in OnCookiesLoaded if we are going 254 // to send previously saved cookies. 255 request_info_.privacy_mode = enable_privacy_mode ? 256 kPrivacyModeEnabled : kPrivacyModeDisabled; 257 258 // Strip Referer from request_info_.extra_headers to prevent, e.g., plugins 259 // from overriding headers that are controlled using other means. Otherwise a 260 // plugin could set a referrer although sending the referrer is inhibited. 261 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kReferer); 262 263 // Our consumer should have made sure that this is a safe referrer. See for 264 // instance WebCore::FrameLoader::HideReferrer. 265 if (referrer.is_valid()) { 266 request_info_.extra_headers.SetHeader(HttpRequestHeaders::kReferer, 267 referrer.spec()); 268 } 269 270 request_info_.extra_headers.SetHeaderIfMissing( 271 HttpRequestHeaders::kUserAgent, 272 http_user_agent_settings_ ? 273 http_user_agent_settings_->GetUserAgent(request_->url()) : 274 std::string()); 275 276 AddExtraHeaders(); 277 AddCookieHeaderAndStart(); 278} 279 280void URLRequestHttpJob::Kill() { 281 if (!transaction_.get()) 282 return; 283 284 weak_factory_.InvalidateWeakPtrs(); 285 DestroyTransaction(); 286 URLRequestJob::Kill(); 287} 288 289void URLRequestHttpJob::NotifyHeadersComplete() { 290 DCHECK(!response_info_); 291 292 response_info_ = transaction_->GetResponseInfo(); 293 294 // Save boolean, as we'll need this info at destruction time, and filters may 295 // also need this info. 296 is_cached_content_ = response_info_->was_cached; 297 298 if (!is_cached_content_ && throttling_entry_.get()) { 299 URLRequestThrottlerHeaderAdapter response_adapter(GetResponseHeaders()); 300 throttling_entry_->UpdateWithResponse(request_info_.url.host(), 301 &response_adapter); 302 } 303 304 // The ordering of these calls is not important. 305 ProcessStrictTransportSecurityHeader(); 306 ProcessPublicKeyPinsHeader(); 307 308 if (SdchManager::Global() && 309 SdchManager::Global()->IsInSupportedDomain(request_->url())) { 310 const std::string name = "Get-Dictionary"; 311 std::string url_text; 312 void* iter = NULL; 313 // TODO(jar): We need to not fetch dictionaries the first time they are 314 // seen, but rather wait until we can justify their usefulness. 315 // For now, we will only fetch the first dictionary, which will at least 316 // require multiple suggestions before we get additional ones for this site. 317 // Eventually we should wait until a dictionary is requested several times 318 // before we even download it (so that we don't waste memory or bandwidth). 319 if (GetResponseHeaders()->EnumerateHeader(&iter, name, &url_text)) { 320 // request_->url() won't be valid in the destructor, so we use an 321 // alternate copy. 322 DCHECK_EQ(request_->url(), request_info_.url); 323 // Resolve suggested URL relative to request url. 324 sdch_dictionary_url_ = request_info_.url.Resolve(url_text); 325 } 326 } 327 328 // The HTTP transaction may be restarted several times for the purposes 329 // of sending authorization information. Each time it restarts, we get 330 // notified of the headers completion so that we can update the cookie store. 331 if (transaction_->IsReadyToRestartForAuth()) { 332 DCHECK(!response_info_->auth_challenge.get()); 333 // TODO(battre): This breaks the webrequest API for 334 // URLRequestTestHTTP.BasicAuthWithCookies 335 // where OnBeforeSendHeaders -> OnSendHeaders -> OnBeforeSendHeaders 336 // occurs. 337 RestartTransactionWithAuth(AuthCredentials()); 338 return; 339 } 340 341 URLRequestJob::NotifyHeadersComplete(); 342} 343 344void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) { 345 DoneWithRequest(FINISHED); 346 URLRequestJob::NotifyDone(status); 347} 348 349void URLRequestHttpJob::DestroyTransaction() { 350 DCHECK(transaction_.get()); 351 352 DoneWithRequest(ABORTED); 353 transaction_.reset(); 354 response_info_ = NULL; 355 receive_headers_end_ = base::TimeTicks(); 356} 357 358void URLRequestHttpJob::StartTransaction() { 359 if (network_delegate()) { 360 OnCallToDelegate(); 361 int rv = network_delegate()->NotifyBeforeSendHeaders( 362 request_, notify_before_headers_sent_callback_, 363 &request_info_.extra_headers); 364 // If an extension blocks the request, we rely on the callback to 365 // MaybeStartTransactionInternal(). 366 if (rv == ERR_IO_PENDING) 367 return; 368 MaybeStartTransactionInternal(rv); 369 return; 370 } 371 StartTransactionInternal(); 372} 373 374void URLRequestHttpJob::NotifyBeforeSendHeadersCallback(int result) { 375 // Check that there are no callbacks to already canceled requests. 376 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); 377 378 MaybeStartTransactionInternal(result); 379} 380 381void URLRequestHttpJob::MaybeStartTransactionInternal(int result) { 382 OnCallToDelegateComplete(); 383 if (result == OK) { 384 StartTransactionInternal(); 385 } else { 386 std::string source("delegate"); 387 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 388 NetLog::StringCallback("source", &source)); 389 NotifyCanceled(); 390 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 391 } 392} 393 394void URLRequestHttpJob::StartTransactionInternal() { 395 // NOTE: This method assumes that request_info_ is already setup properly. 396 397 // If we already have a transaction, then we should restart the transaction 398 // with auth provided by auth_credentials_. 399 400 int rv; 401 402 if (network_delegate()) { 403 network_delegate()->NotifySendHeaders( 404 request_, request_info_.extra_headers); 405 } 406 407 if (transaction_.get()) { 408 rv = transaction_->RestartWithAuth(auth_credentials_, start_callback_); 409 auth_credentials_ = AuthCredentials(); 410 } else { 411 DCHECK(request_->context()->http_transaction_factory()); 412 413 rv = request_->context()->http_transaction_factory()->CreateTransaction( 414 priority_, &transaction_); 415 416 if (rv == OK && request_info_.url.SchemeIsWSOrWSS()) { 417 // TODO(ricea): Implement WebSocket throttling semantics as defined in 418 // RFC6455 Section 4.1. 419 base::SupportsUserData::Data* data = request_->GetUserData( 420 WebSocketHandshakeStreamBase::CreateHelper::DataKey()); 421 if (data) { 422 transaction_->SetWebSocketHandshakeStreamCreateHelper( 423 static_cast<WebSocketHandshakeStreamBase::CreateHelper*>(data)); 424 } else { 425 rv = ERR_DISALLOWED_URL_SCHEME; 426 } 427 } 428 429 if (rv == OK) { 430 transaction_->SetBeforeNetworkStartCallback( 431 base::Bind(&URLRequestHttpJob::NotifyBeforeNetworkStart, 432 base::Unretained(this))); 433 434 if (!throttling_entry_.get() || 435 !throttling_entry_->ShouldRejectRequest(*request_)) { 436 rv = transaction_->Start( 437 &request_info_, start_callback_, request_->net_log()); 438 start_time_ = base::TimeTicks::Now(); 439 } else { 440 // Special error code for the exponential back-off module. 441 rv = ERR_TEMPORARILY_THROTTLED; 442 } 443 } 444 } 445 446 if (rv == ERR_IO_PENDING) 447 return; 448 449 // The transaction started synchronously, but we need to notify the 450 // URLRequest delegate via the message loop. 451 base::MessageLoop::current()->PostTask( 452 FROM_HERE, 453 base::Bind(&URLRequestHttpJob::OnStartCompleted, 454 weak_factory_.GetWeakPtr(), rv)); 455} 456 457void URLRequestHttpJob::AddExtraHeaders() { 458 // Supply Accept-Encoding field only if it is not already provided. 459 // It should be provided IF the content is known to have restrictions on 460 // potential encoding, such as streaming multi-media. 461 // For details see bug 47381. 462 // TODO(jar, enal): jpeg files etc. should set up a request header if 463 // possible. Right now it is done only by buffered_resource_loader and 464 // simple_data_source. 465 if (!request_info_.extra_headers.HasHeader( 466 HttpRequestHeaders::kAcceptEncoding)) { 467 bool advertise_sdch = SdchManager::Global() && 468 SdchManager::Global()->IsInSupportedDomain(request_->url()); 469 std::string avail_dictionaries; 470 if (advertise_sdch) { 471 SdchManager::Global()->GetAvailDictionaryList(request_->url(), 472 &avail_dictionaries); 473 474 // The AllowLatencyExperiment() is only true if we've successfully done a 475 // full SDCH compression recently in this browser session for this host. 476 // Note that for this path, there might be no applicable dictionaries, 477 // and hence we can't participate in the experiment. 478 if (!avail_dictionaries.empty() && 479 SdchManager::Global()->AllowLatencyExperiment(request_->url())) { 480 // We are participating in the test (or control), and hence we'll 481 // eventually record statistics via either SDCH_EXPERIMENT_DECODE or 482 // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data. 483 packet_timing_enabled_ = true; 484 if (base::RandDouble() < .01) { 485 sdch_test_control_ = true; // 1% probability. 486 advertise_sdch = false; 487 } else { 488 sdch_test_activated_ = true; 489 } 490 } 491 } 492 493 // Supply Accept-Encoding headers first so that it is more likely that they 494 // will be in the first transmitted packet. This can sometimes make it 495 // easier to filter and analyze the streams to assure that a proxy has not 496 // damaged these headers. Some proxies deliberately corrupt Accept-Encoding 497 // headers. 498 if (!advertise_sdch) { 499 // Tell the server what compression formats we support (other than SDCH). 500 request_info_.extra_headers.SetHeader( 501 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate"); 502 } else { 503 // Include SDCH in acceptable list. 504 request_info_.extra_headers.SetHeader( 505 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch"); 506 if (!avail_dictionaries.empty()) { 507 request_info_.extra_headers.SetHeader( 508 kAvailDictionaryHeader, 509 avail_dictionaries); 510 sdch_dictionary_advertised_ = true; 511 // Since we're tagging this transaction as advertising a dictionary, 512 // we'll definitely employ an SDCH filter (or tentative sdch filter) 513 // when we get a response. When done, we'll record histograms via 514 // SDCH_DECODE or SDCH_PASSTHROUGH. Hence we need to record packet 515 // arrival times. 516 packet_timing_enabled_ = true; 517 } 518 } 519 } 520 521 if (http_user_agent_settings_) { 522 // Only add default Accept-Language if the request didn't have it 523 // specified. 524 std::string accept_language = 525 http_user_agent_settings_->GetAcceptLanguage(); 526 if (!accept_language.empty()) { 527 request_info_.extra_headers.SetHeaderIfMissing( 528 HttpRequestHeaders::kAcceptLanguage, 529 accept_language); 530 } 531 } 532} 533 534void URLRequestHttpJob::AddCookieHeaderAndStart() { 535 // No matter what, we want to report our status as IO pending since we will 536 // be notifying our consumer asynchronously via OnStartCompleted. 537 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 538 539 // If the request was destroyed, then there is no more work to do. 540 if (!request_) 541 return; 542 543 CookieStore* cookie_store = request_->context()->cookie_store(); 544 if (cookie_store && !(request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES)) { 545 net::CookieMonster* cookie_monster = cookie_store->GetCookieMonster(); 546 if (cookie_monster) { 547 cookie_monster->GetAllCookiesForURLAsync( 548 request_->url(), 549 base::Bind(&URLRequestHttpJob::CheckCookiePolicyAndLoad, 550 weak_factory_.GetWeakPtr())); 551 } else { 552 CheckCookiePolicyAndLoad(CookieList()); 553 } 554 } else { 555 DoStartTransaction(); 556 } 557} 558 559void URLRequestHttpJob::DoLoadCookies() { 560 CookieOptions options; 561 options.set_include_httponly(); 562 request_->context()->cookie_store()->GetCookiesWithOptionsAsync( 563 request_->url(), options, 564 base::Bind(&URLRequestHttpJob::OnCookiesLoaded, 565 weak_factory_.GetWeakPtr())); 566} 567 568void URLRequestHttpJob::CheckCookiePolicyAndLoad( 569 const CookieList& cookie_list) { 570 if (CanGetCookies(cookie_list)) 571 DoLoadCookies(); 572 else 573 DoStartTransaction(); 574} 575 576void URLRequestHttpJob::OnCookiesLoaded(const std::string& cookie_line) { 577 if (!cookie_line.empty()) { 578 request_info_.extra_headers.SetHeader( 579 HttpRequestHeaders::kCookie, cookie_line); 580 // Disable privacy mode as we are sending cookies anyway. 581 request_info_.privacy_mode = kPrivacyModeDisabled; 582 } 583 DoStartTransaction(); 584} 585 586void URLRequestHttpJob::DoStartTransaction() { 587 // We may have been canceled while retrieving cookies. 588 if (GetStatus().is_success()) { 589 StartTransaction(); 590 } else { 591 NotifyCanceled(); 592 } 593} 594 595void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete(int result) { 596 // End of the call started in OnStartCompleted. 597 OnCallToDelegateComplete(); 598 599 if (result != net::OK) { 600 std::string source("delegate"); 601 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 602 NetLog::StringCallback("source", &source)); 603 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 604 return; 605 } 606 607 DCHECK(transaction_.get()); 608 609 const HttpResponseInfo* response_info = transaction_->GetResponseInfo(); 610 DCHECK(response_info); 611 612 response_cookies_.clear(); 613 response_cookies_save_index_ = 0; 614 615 FetchResponseCookies(&response_cookies_); 616 617 if (!GetResponseHeaders()->GetDateValue(&response_date_)) 618 response_date_ = base::Time(); 619 620 // Now, loop over the response cookies, and attempt to persist each. 621 SaveNextCookie(); 622} 623 624// If the save occurs synchronously, SaveNextCookie will loop and save the next 625// cookie. If the save is deferred, the callback is responsible for continuing 626// to iterate through the cookies. 627// TODO(erikwright): Modify the CookieStore API to indicate via return value 628// whether it completed synchronously or asynchronously. 629// See http://crbug.com/131066. 630void URLRequestHttpJob::SaveNextCookie() { 631 // No matter what, we want to report our status as IO pending since we will 632 // be notifying our consumer asynchronously via OnStartCompleted. 633 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 634 635 // Used to communicate with the callback. See the implementation of 636 // OnCookieSaved. 637 scoped_refptr<SharedBoolean> callback_pending = new SharedBoolean(false); 638 scoped_refptr<SharedBoolean> save_next_cookie_running = 639 new SharedBoolean(true); 640 641 if (!(request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) && 642 request_->context()->cookie_store() && 643 response_cookies_.size() > 0) { 644 CookieOptions options; 645 options.set_include_httponly(); 646 options.set_server_time(response_date_); 647 648 net::CookieStore::SetCookiesCallback callback( 649 base::Bind(&URLRequestHttpJob::OnCookieSaved, 650 weak_factory_.GetWeakPtr(), 651 save_next_cookie_running, 652 callback_pending)); 653 654 // Loop through the cookies as long as SetCookieWithOptionsAsync completes 655 // synchronously. 656 while (!callback_pending->data && 657 response_cookies_save_index_ < response_cookies_.size()) { 658 if (CanSetCookie( 659 response_cookies_[response_cookies_save_index_], &options)) { 660 callback_pending->data = true; 661 request_->context()->cookie_store()->SetCookieWithOptionsAsync( 662 request_->url(), response_cookies_[response_cookies_save_index_], 663 options, callback); 664 } 665 ++response_cookies_save_index_; 666 } 667 } 668 669 save_next_cookie_running->data = false; 670 671 if (!callback_pending->data) { 672 response_cookies_.clear(); 673 response_cookies_save_index_ = 0; 674 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status 675 NotifyHeadersComplete(); 676 return; 677 } 678} 679 680// |save_next_cookie_running| is true when the callback is bound and set to 681// false when SaveNextCookie exits, allowing the callback to determine if the 682// save occurred synchronously or asynchronously. 683// |callback_pending| is false when the callback is invoked and will be set to 684// true by the callback, allowing SaveNextCookie to detect whether the save 685// occurred synchronously. 686// See SaveNextCookie() for more information. 687void URLRequestHttpJob::OnCookieSaved( 688 scoped_refptr<SharedBoolean> save_next_cookie_running, 689 scoped_refptr<SharedBoolean> callback_pending, 690 bool cookie_status) { 691 callback_pending->data = false; 692 693 // If we were called synchronously, return. 694 if (save_next_cookie_running->data) { 695 return; 696 } 697 698 // We were called asynchronously, so trigger the next save. 699 // We may have been canceled within OnSetCookie. 700 if (GetStatus().is_success()) { 701 SaveNextCookie(); 702 } else { 703 NotifyCanceled(); 704 } 705} 706 707void URLRequestHttpJob::FetchResponseCookies( 708 std::vector<std::string>* cookies) { 709 const std::string name = "Set-Cookie"; 710 std::string value; 711 712 void* iter = NULL; 713 HttpResponseHeaders* headers = GetResponseHeaders(); 714 while (headers->EnumerateHeader(&iter, name, &value)) { 715 if (!value.empty()) 716 cookies->push_back(value); 717 } 718} 719 720// NOTE: |ProcessStrictTransportSecurityHeader| and 721// |ProcessPublicKeyPinsHeader| have very similar structures, by design. 722void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() { 723 DCHECK(response_info_); 724 TransportSecurityState* security_state = 725 request_->context()->transport_security_state(); 726 const SSLInfo& ssl_info = response_info_->ssl_info; 727 728 // Only accept HSTS headers on HTTPS connections that have no 729 // certificate errors. 730 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) || 731 !security_state) 732 return; 733 734 // http://tools.ietf.org/html/draft-ietf-websec-strict-transport-sec: 735 // 736 // If a UA receives more than one STS header field in a HTTP response 737 // message over secure transport, then the UA MUST process only the 738 // first such header field. 739 HttpResponseHeaders* headers = GetResponseHeaders(); 740 std::string value; 741 if (headers->EnumerateHeader(NULL, "Strict-Transport-Security", &value)) 742 security_state->AddHSTSHeader(request_info_.url.host(), value); 743} 744 745void URLRequestHttpJob::ProcessPublicKeyPinsHeader() { 746 DCHECK(response_info_); 747 TransportSecurityState* security_state = 748 request_->context()->transport_security_state(); 749 const SSLInfo& ssl_info = response_info_->ssl_info; 750 751 // Only accept HPKP headers on HTTPS connections that have no 752 // certificate errors. 753 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) || 754 !security_state) 755 return; 756 757 // http://tools.ietf.org/html/draft-ietf-websec-key-pinning: 758 // 759 // If a UA receives more than one PKP header field in an HTTP 760 // response message over secure transport, then the UA MUST process 761 // only the first such header field. 762 HttpResponseHeaders* headers = GetResponseHeaders(); 763 std::string value; 764 if (headers->EnumerateHeader(NULL, "Public-Key-Pins", &value)) 765 security_state->AddHPKPHeader(request_info_.url.host(), value, ssl_info); 766} 767 768void URLRequestHttpJob::OnStartCompleted(int result) { 769 RecordTimer(); 770 771 // If the request was destroyed, then there is no more work to do. 772 if (!request_) 773 return; 774 775 // If the transaction was destroyed, then the job was cancelled, and 776 // we can just ignore this notification. 777 if (!transaction_.get()) 778 return; 779 780 receive_headers_end_ = base::TimeTicks::Now(); 781 782 // Clear the IO_PENDING status 783 SetStatus(URLRequestStatus()); 784 785 const URLRequestContext* context = request_->context(); 786 787 if (result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN && 788 transaction_->GetResponseInfo() != NULL) { 789 FraudulentCertificateReporter* reporter = 790 context->fraudulent_certificate_reporter(); 791 if (reporter != NULL) { 792 const SSLInfo& ssl_info = transaction_->GetResponseInfo()->ssl_info; 793 bool sni_available = SSLConfigService::IsSNIAvailable( 794 context->ssl_config_service()); 795 const std::string& host = request_->url().host(); 796 797 reporter->SendReport(host, ssl_info, sni_available); 798 } 799 } 800 801 if (result == OK) { 802 scoped_refptr<HttpResponseHeaders> headers = GetResponseHeaders(); 803 if (network_delegate()) { 804 // Note that |this| may not be deleted until 805 // |on_headers_received_callback_| or 806 // |NetworkDelegate::URLRequestDestroyed()| has been called. 807 OnCallToDelegate(); 808 int error = network_delegate()->NotifyHeadersReceived( 809 request_, 810 on_headers_received_callback_, 811 headers.get(), 812 &override_response_headers_); 813 if (error != net::OK) { 814 if (error == net::ERR_IO_PENDING) { 815 awaiting_callback_ = true; 816 } else { 817 std::string source("delegate"); 818 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 819 NetLog::StringCallback("source", 820 &source)); 821 OnCallToDelegateComplete(); 822 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, error)); 823 } 824 return; 825 } 826 } 827 828 SaveCookiesAndNotifyHeadersComplete(net::OK); 829 } else if (IsCertificateError(result)) { 830 // We encountered an SSL certificate error. 831 if (result == ERR_SSL_WEAK_SERVER_EPHEMERAL_DH_KEY || 832 result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN) { 833 // These are hard failures. They're handled separately and don't have 834 // the correct cert status, so set it here. 835 SSLInfo info(transaction_->GetResponseInfo()->ssl_info); 836 info.cert_status = MapNetErrorToCertStatus(result); 837 NotifySSLCertificateError(info, true); 838 } else { 839 // Maybe overridable, maybe not. Ask the delegate to decide. 840 TransportSecurityState::DomainState domain_state; 841 const URLRequestContext* context = request_->context(); 842 const bool fatal = context->transport_security_state() && 843 context->transport_security_state()->GetDomainState( 844 request_info_.url.host(), 845 SSLConfigService::IsSNIAvailable(context->ssl_config_service()), 846 &domain_state) && 847 domain_state.ShouldSSLErrorsBeFatal(); 848 NotifySSLCertificateError( 849 transaction_->GetResponseInfo()->ssl_info, fatal); 850 } 851 } else if (result == ERR_SSL_CLIENT_AUTH_CERT_NEEDED) { 852 NotifyCertificateRequested( 853 transaction_->GetResponseInfo()->cert_request_info.get()); 854 } else { 855 // Even on an error, there may be useful information in the response 856 // info (e.g. whether there's a cached copy). 857 if (transaction_.get()) 858 response_info_ = transaction_->GetResponseInfo(); 859 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 860 } 861} 862 863void URLRequestHttpJob::OnHeadersReceivedCallback(int result) { 864 awaiting_callback_ = false; 865 866 // Check that there are no callbacks to already canceled requests. 867 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); 868 869 SaveCookiesAndNotifyHeadersComplete(result); 870} 871 872void URLRequestHttpJob::OnReadCompleted(int result) { 873 read_in_progress_ = false; 874 875 if (ShouldFixMismatchedContentLength(result)) 876 result = OK; 877 878 if (result == OK) { 879 NotifyDone(URLRequestStatus()); 880 } else if (result < 0) { 881 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); 882 } else { 883 // Clear the IO_PENDING status 884 SetStatus(URLRequestStatus()); 885 } 886 887 NotifyReadComplete(result); 888} 889 890void URLRequestHttpJob::RestartTransactionWithAuth( 891 const AuthCredentials& credentials) { 892 auth_credentials_ = credentials; 893 894 // These will be reset in OnStartCompleted. 895 response_info_ = NULL; 896 receive_headers_end_ = base::TimeTicks(); 897 response_cookies_.clear(); 898 899 ResetTimer(); 900 901 // Update the cookies, since the cookie store may have been updated from the 902 // headers in the 401/407. Since cookies were already appended to 903 // extra_headers, we need to strip them out before adding them again. 904 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kCookie); 905 906 AddCookieHeaderAndStart(); 907} 908 909void URLRequestHttpJob::SetUpload(UploadDataStream* upload) { 910 DCHECK(!transaction_.get()) << "cannot change once started"; 911 request_info_.upload_data_stream = upload; 912} 913 914void URLRequestHttpJob::SetExtraRequestHeaders( 915 const HttpRequestHeaders& headers) { 916 DCHECK(!transaction_.get()) << "cannot change once started"; 917 request_info_.extra_headers.CopyFrom(headers); 918} 919 920LoadState URLRequestHttpJob::GetLoadState() const { 921 return transaction_.get() ? 922 transaction_->GetLoadState() : LOAD_STATE_IDLE; 923} 924 925UploadProgress URLRequestHttpJob::GetUploadProgress() const { 926 return transaction_.get() ? 927 transaction_->GetUploadProgress() : UploadProgress(); 928} 929 930bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const { 931 DCHECK(transaction_.get()); 932 933 if (!response_info_) 934 return false; 935 936 return GetResponseHeaders()->GetMimeType(mime_type); 937} 938 939bool URLRequestHttpJob::GetCharset(std::string* charset) { 940 DCHECK(transaction_.get()); 941 942 if (!response_info_) 943 return false; 944 945 return GetResponseHeaders()->GetCharset(charset); 946} 947 948void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) { 949 DCHECK(request_); 950 951 if (response_info_) { 952 DCHECK(transaction_.get()); 953 954 *info = *response_info_; 955 if (override_response_headers_.get()) 956 info->headers = override_response_headers_; 957 } 958} 959 960void URLRequestHttpJob::GetLoadTimingInfo( 961 LoadTimingInfo* load_timing_info) const { 962 // If haven't made it far enough to receive any headers, don't return 963 // anything. This makes for more consistent behavior in the case of errors. 964 if (!transaction_ || receive_headers_end_.is_null()) 965 return; 966 if (transaction_->GetLoadTimingInfo(load_timing_info)) 967 load_timing_info->receive_headers_end = receive_headers_end_; 968} 969 970bool URLRequestHttpJob::GetResponseCookies(std::vector<std::string>* cookies) { 971 DCHECK(transaction_.get()); 972 973 if (!response_info_) 974 return false; 975 976 // TODO(darin): Why are we extracting response cookies again? Perhaps we 977 // should just leverage response_cookies_. 978 979 cookies->clear(); 980 FetchResponseCookies(cookies); 981 return true; 982} 983 984int URLRequestHttpJob::GetResponseCode() const { 985 DCHECK(transaction_.get()); 986 987 if (!response_info_) 988 return -1; 989 990 return GetResponseHeaders()->response_code(); 991} 992 993Filter* URLRequestHttpJob::SetupFilter() const { 994 DCHECK(transaction_.get()); 995 if (!response_info_) 996 return NULL; 997 998 std::vector<Filter::FilterType> encoding_types; 999 std::string encoding_type; 1000 HttpResponseHeaders* headers = GetResponseHeaders(); 1001 void* iter = NULL; 1002 while (headers->EnumerateHeader(&iter, "Content-Encoding", &encoding_type)) { 1003 encoding_types.push_back(Filter::ConvertEncodingToType(encoding_type)); 1004 } 1005 1006 if (filter_context_->IsSdchResponse()) { 1007 // We are wary of proxies that discard or damage SDCH encoding. If a server 1008 // explicitly states that this is not SDCH content, then we can correct our 1009 // assumption that this is an SDCH response, and avoid the need to recover 1010 // as though the content is corrupted (when we discover it is not SDCH 1011 // encoded). 1012 std::string sdch_response_status; 1013 iter = NULL; 1014 while (headers->EnumerateHeader(&iter, "X-Sdch-Encode", 1015 &sdch_response_status)) { 1016 if (sdch_response_status == "0") { 1017 filter_context_->ResetSdchResponseToFalse(); 1018 break; 1019 } 1020 } 1021 } 1022 1023 // Even if encoding types are empty, there is a chance that we need to add 1024 // some decoding, as some proxies strip encoding completely. In such cases, 1025 // we may need to add (for example) SDCH filtering (when the context suggests 1026 // it is appropriate). 1027 Filter::FixupEncodingTypes(*filter_context_, &encoding_types); 1028 1029 return !encoding_types.empty() 1030 ? Filter::Factory(encoding_types, *filter_context_) : NULL; 1031} 1032 1033bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) { 1034 // HTTP is always safe. 1035 // TODO(pauljensen): Remove once crbug.com/146591 is fixed. 1036 if (location.is_valid() && 1037 (location.scheme() == "http" || location.scheme() == "https")) { 1038 return true; 1039 } 1040 // Query URLRequestJobFactory as to whether |location| would be safe to 1041 // redirect to. 1042 return request_->context()->job_factory() && 1043 request_->context()->job_factory()->IsSafeRedirectTarget(location); 1044} 1045 1046bool URLRequestHttpJob::NeedsAuth() { 1047 int code = GetResponseCode(); 1048 if (code == -1) 1049 return false; 1050 1051 // Check if we need either Proxy or WWW Authentication. This could happen 1052 // because we either provided no auth info, or provided incorrect info. 1053 switch (code) { 1054 case 407: 1055 if (proxy_auth_state_ == AUTH_STATE_CANCELED) 1056 return false; 1057 proxy_auth_state_ = AUTH_STATE_NEED_AUTH; 1058 return true; 1059 case 401: 1060 if (server_auth_state_ == AUTH_STATE_CANCELED) 1061 return false; 1062 server_auth_state_ = AUTH_STATE_NEED_AUTH; 1063 return true; 1064 } 1065 return false; 1066} 1067 1068void URLRequestHttpJob::GetAuthChallengeInfo( 1069 scoped_refptr<AuthChallengeInfo>* result) { 1070 DCHECK(transaction_.get()); 1071 DCHECK(response_info_); 1072 1073 // sanity checks: 1074 DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH || 1075 server_auth_state_ == AUTH_STATE_NEED_AUTH); 1076 DCHECK((GetResponseHeaders()->response_code() == HTTP_UNAUTHORIZED) || 1077 (GetResponseHeaders()->response_code() == 1078 HTTP_PROXY_AUTHENTICATION_REQUIRED)); 1079 1080 *result = response_info_->auth_challenge; 1081} 1082 1083void URLRequestHttpJob::SetAuth(const AuthCredentials& credentials) { 1084 DCHECK(transaction_.get()); 1085 1086 // Proxy gets set first, then WWW. 1087 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { 1088 proxy_auth_state_ = AUTH_STATE_HAVE_AUTH; 1089 } else { 1090 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); 1091 server_auth_state_ = AUTH_STATE_HAVE_AUTH; 1092 } 1093 1094 RestartTransactionWithAuth(credentials); 1095} 1096 1097void URLRequestHttpJob::CancelAuth() { 1098 // Proxy gets set first, then WWW. 1099 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { 1100 proxy_auth_state_ = AUTH_STATE_CANCELED; 1101 } else { 1102 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); 1103 server_auth_state_ = AUTH_STATE_CANCELED; 1104 } 1105 1106 // These will be reset in OnStartCompleted. 1107 response_info_ = NULL; 1108 receive_headers_end_ = base::TimeTicks::Now(); 1109 response_cookies_.clear(); 1110 1111 ResetTimer(); 1112 1113 // OK, let the consumer read the error page... 1114 // 1115 // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false, 1116 // which will cause the consumer to receive OnResponseStarted instead of 1117 // OnAuthRequired. 1118 // 1119 // We have to do this via InvokeLater to avoid "recursing" the consumer. 1120 // 1121 base::MessageLoop::current()->PostTask( 1122 FROM_HERE, 1123 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1124 weak_factory_.GetWeakPtr(), OK)); 1125} 1126 1127void URLRequestHttpJob::ContinueWithCertificate( 1128 X509Certificate* client_cert) { 1129 DCHECK(transaction_.get()); 1130 1131 DCHECK(!response_info_) << "should not have a response yet"; 1132 receive_headers_end_ = base::TimeTicks(); 1133 1134 ResetTimer(); 1135 1136 // No matter what, we want to report our status as IO pending since we will 1137 // be notifying our consumer asynchronously via OnStartCompleted. 1138 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1139 1140 int rv = transaction_->RestartWithCertificate(client_cert, start_callback_); 1141 if (rv == ERR_IO_PENDING) 1142 return; 1143 1144 // The transaction started synchronously, but we need to notify the 1145 // URLRequest delegate via the message loop. 1146 base::MessageLoop::current()->PostTask( 1147 FROM_HERE, 1148 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1149 weak_factory_.GetWeakPtr(), rv)); 1150} 1151 1152void URLRequestHttpJob::ContinueDespiteLastError() { 1153 // If the transaction was destroyed, then the job was cancelled. 1154 if (!transaction_.get()) 1155 return; 1156 1157 DCHECK(!response_info_) << "should not have a response yet"; 1158 receive_headers_end_ = base::TimeTicks(); 1159 1160 ResetTimer(); 1161 1162 // No matter what, we want to report our status as IO pending since we will 1163 // be notifying our consumer asynchronously via OnStartCompleted. 1164 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1165 1166 int rv = transaction_->RestartIgnoringLastError(start_callback_); 1167 if (rv == ERR_IO_PENDING) 1168 return; 1169 1170 // The transaction started synchronously, but we need to notify the 1171 // URLRequest delegate via the message loop. 1172 base::MessageLoop::current()->PostTask( 1173 FROM_HERE, 1174 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1175 weak_factory_.GetWeakPtr(), rv)); 1176} 1177 1178void URLRequestHttpJob::ResumeNetworkStart() { 1179 DCHECK(transaction_.get()); 1180 transaction_->ResumeNetworkStart(); 1181} 1182 1183bool URLRequestHttpJob::ShouldFixMismatchedContentLength(int rv) const { 1184 // Some servers send the body compressed, but specify the content length as 1185 // the uncompressed size. Although this violates the HTTP spec we want to 1186 // support it (as IE and FireFox do), but *only* for an exact match. 1187 // See http://crbug.com/79694. 1188 if (rv == net::ERR_CONTENT_LENGTH_MISMATCH || 1189 rv == net::ERR_INCOMPLETE_CHUNKED_ENCODING) { 1190 if (request_ && request_->response_headers()) { 1191 int64 expected_length = request_->response_headers()->GetContentLength(); 1192 VLOG(1) << __FUNCTION__ << "() " 1193 << "\"" << request_->url().spec() << "\"" 1194 << " content-length = " << expected_length 1195 << " pre total = " << prefilter_bytes_read() 1196 << " post total = " << postfilter_bytes_read(); 1197 if (postfilter_bytes_read() == expected_length) { 1198 // Clear the error. 1199 return true; 1200 } 1201 } 1202 } 1203 return false; 1204} 1205 1206bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size, 1207 int* bytes_read) { 1208 DCHECK_NE(buf_size, 0); 1209 DCHECK(bytes_read); 1210 DCHECK(!read_in_progress_); 1211 1212 int rv = transaction_->Read( 1213 buf, buf_size, 1214 base::Bind(&URLRequestHttpJob::OnReadCompleted, base::Unretained(this))); 1215 1216 if (ShouldFixMismatchedContentLength(rv)) 1217 rv = 0; 1218 1219 if (rv >= 0) { 1220 *bytes_read = rv; 1221 if (!rv) 1222 DoneWithRequest(FINISHED); 1223 return true; 1224 } 1225 1226 if (rv == ERR_IO_PENDING) { 1227 read_in_progress_ = true; 1228 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1229 } else { 1230 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); 1231 } 1232 1233 return false; 1234} 1235 1236void URLRequestHttpJob::StopCaching() { 1237 if (transaction_.get()) 1238 transaction_->StopCaching(); 1239} 1240 1241bool URLRequestHttpJob::GetFullRequestHeaders( 1242 HttpRequestHeaders* headers) const { 1243 if (!transaction_) 1244 return false; 1245 1246 return transaction_->GetFullRequestHeaders(headers); 1247} 1248 1249int64 URLRequestHttpJob::GetTotalReceivedBytes() const { 1250 if (!transaction_) 1251 return 0; 1252 1253 return transaction_->GetTotalReceivedBytes(); 1254} 1255 1256void URLRequestHttpJob::DoneReading() { 1257 if (transaction_.get()) 1258 transaction_->DoneReading(); 1259 DoneWithRequest(FINISHED); 1260} 1261 1262HostPortPair URLRequestHttpJob::GetSocketAddress() const { 1263 return response_info_ ? response_info_->socket_address : HostPortPair(); 1264} 1265 1266void URLRequestHttpJob::RecordTimer() { 1267 if (request_creation_time_.is_null()) { 1268 NOTREACHED() 1269 << "The same transaction shouldn't start twice without new timing."; 1270 return; 1271 } 1272 1273 base::TimeDelta to_start = base::Time::Now() - request_creation_time_; 1274 request_creation_time_ = base::Time(); 1275 1276 UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte", to_start); 1277} 1278 1279void URLRequestHttpJob::ResetTimer() { 1280 if (!request_creation_time_.is_null()) { 1281 NOTREACHED() 1282 << "The timer was reset before it was recorded."; 1283 return; 1284 } 1285 request_creation_time_ = base::Time::Now(); 1286} 1287 1288void URLRequestHttpJob::UpdatePacketReadTimes() { 1289 if (!packet_timing_enabled_) 1290 return; 1291 1292 if (filter_input_byte_count() <= bytes_observed_in_packets_) { 1293 DCHECK_EQ(filter_input_byte_count(), bytes_observed_in_packets_); 1294 return; // No new bytes have arrived. 1295 } 1296 1297 final_packet_time_ = base::Time::Now(); 1298 if (!bytes_observed_in_packets_) 1299 request_time_snapshot_ = request_ ? request_->request_time() : base::Time(); 1300 1301 bytes_observed_in_packets_ = filter_input_byte_count(); 1302} 1303 1304void URLRequestHttpJob::RecordPacketStats( 1305 FilterContext::StatisticSelector statistic) const { 1306 if (!packet_timing_enabled_ || (final_packet_time_ == base::Time())) 1307 return; 1308 1309 base::TimeDelta duration = final_packet_time_ - request_time_snapshot_; 1310 switch (statistic) { 1311 case FilterContext::SDCH_DECODE: { 1312 UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b", 1313 static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100); 1314 return; 1315 } 1316 case FilterContext::SDCH_PASSTHROUGH: { 1317 // Despite advertising a dictionary, we handled non-sdch compressed 1318 // content. 1319 return; 1320 } 1321 1322 case FilterContext::SDCH_EXPERIMENT_DECODE: { 1323 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Decode", 1324 duration, 1325 base::TimeDelta::FromMilliseconds(20), 1326 base::TimeDelta::FromMinutes(10), 100); 1327 return; 1328 } 1329 case FilterContext::SDCH_EXPERIMENT_HOLDBACK: { 1330 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Holdback", 1331 duration, 1332 base::TimeDelta::FromMilliseconds(20), 1333 base::TimeDelta::FromMinutes(10), 100); 1334 return; 1335 } 1336 default: 1337 NOTREACHED(); 1338 return; 1339 } 1340} 1341 1342// The common type of histogram we use for all compression-tracking histograms. 1343#define COMPRESSION_HISTOGRAM(name, sample) \ 1344 do { \ 1345 UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \ 1346 500, 1000000, 100); \ 1347 } while (0) 1348 1349void URLRequestHttpJob::RecordCompressionHistograms() { 1350 DCHECK(request_); 1351 if (!request_) 1352 return; 1353 1354 if (is_cached_content_ || // Don't record cached content 1355 !GetStatus().is_success() || // Don't record failed content 1356 !IsCompressibleContent() || // Only record compressible content 1357 !prefilter_bytes_read()) // Zero-byte responses aren't useful. 1358 return; 1359 1360 // Miniature requests aren't really compressible. Don't count them. 1361 const int kMinSize = 16; 1362 if (prefilter_bytes_read() < kMinSize) 1363 return; 1364 1365 // Only record for http or https urls. 1366 bool is_http = request_->url().SchemeIs("http"); 1367 bool is_https = request_->url().SchemeIs("https"); 1368 if (!is_http && !is_https) 1369 return; 1370 1371 int compressed_B = prefilter_bytes_read(); 1372 int decompressed_B = postfilter_bytes_read(); 1373 bool was_filtered = HasFilter(); 1374 1375 // We want to record how often downloaded resources are compressed. 1376 // But, we recognize that different protocols may have different 1377 // properties. So, for each request, we'll put it into one of 3 1378 // groups: 1379 // a) SSL resources 1380 // Proxies cannot tamper with compression headers with SSL. 1381 // b) Non-SSL, loaded-via-proxy resources 1382 // In this case, we know a proxy might have interfered. 1383 // c) Non-SSL, loaded-without-proxy resources 1384 // In this case, we know there was no explicit proxy. However, 1385 // it is possible that a transparent proxy was still interfering. 1386 // 1387 // For each group, we record the same 3 histograms. 1388 1389 if (is_https) { 1390 if (was_filtered) { 1391 COMPRESSION_HISTOGRAM("SSL.BytesBeforeCompression", compressed_B); 1392 COMPRESSION_HISTOGRAM("SSL.BytesAfterCompression", decompressed_B); 1393 } else { 1394 COMPRESSION_HISTOGRAM("SSL.ShouldHaveBeenCompressed", decompressed_B); 1395 } 1396 return; 1397 } 1398 1399 if (request_->was_fetched_via_proxy()) { 1400 if (was_filtered) { 1401 COMPRESSION_HISTOGRAM("Proxy.BytesBeforeCompression", compressed_B); 1402 COMPRESSION_HISTOGRAM("Proxy.BytesAfterCompression", decompressed_B); 1403 } else { 1404 COMPRESSION_HISTOGRAM("Proxy.ShouldHaveBeenCompressed", decompressed_B); 1405 } 1406 return; 1407 } 1408 1409 if (was_filtered) { 1410 COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B); 1411 COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B); 1412 } else { 1413 COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B); 1414 } 1415} 1416 1417bool URLRequestHttpJob::IsCompressibleContent() const { 1418 std::string mime_type; 1419 return GetMimeType(&mime_type) && 1420 (IsSupportedJavascriptMimeType(mime_type.c_str()) || 1421 IsSupportedNonImageMimeType(mime_type.c_str())); 1422} 1423 1424void URLRequestHttpJob::RecordPerfHistograms(CompletionCause reason) { 1425 if (start_time_.is_null()) 1426 return; 1427 1428 base::TimeDelta total_time = base::TimeTicks::Now() - start_time_; 1429 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTime", total_time); 1430 1431 if (reason == FINISHED) { 1432 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeSuccess", total_time); 1433 } else { 1434 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCancel", total_time); 1435 } 1436 1437 if (response_info_) { 1438 if (response_info_->was_cached) { 1439 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCached", total_time); 1440 } else { 1441 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeNotCached", total_time); 1442 } 1443 } 1444 1445 if (request_info_.load_flags & LOAD_PREFETCH && !request_->was_cached()) 1446 UMA_HISTOGRAM_COUNTS("Net.Prefetch.PrefilterBytesReadFromNetwork", 1447 prefilter_bytes_read()); 1448 1449 start_time_ = base::TimeTicks(); 1450} 1451 1452void URLRequestHttpJob::DoneWithRequest(CompletionCause reason) { 1453 if (done_) 1454 return; 1455 done_ = true; 1456 RecordPerfHistograms(reason); 1457 if (reason == FINISHED) { 1458 request_->set_received_response_content_length(prefilter_bytes_read()); 1459 RecordCompressionHistograms(); 1460 } 1461} 1462 1463HttpResponseHeaders* URLRequestHttpJob::GetResponseHeaders() const { 1464 DCHECK(transaction_.get()); 1465 DCHECK(transaction_->GetResponseInfo()); 1466 return override_response_headers_.get() ? 1467 override_response_headers_.get() : 1468 transaction_->GetResponseInfo()->headers.get(); 1469} 1470 1471void URLRequestHttpJob::NotifyURLRequestDestroyed() { 1472 awaiting_callback_ = false; 1473} 1474 1475} // namespace net 1476