1// Copyright (c) 2012 The Chromium Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "net/url_request/url_request_http_job.h" 6 7#include "base/base_switches.h" 8#include "base/bind.h" 9#include "base/bind_helpers.h" 10#include "base/command_line.h" 11#include "base/compiler_specific.h" 12#include "base/file_version_info.h" 13#include "base/message_loop/message_loop.h" 14#include "base/metrics/field_trial.h" 15#include "base/metrics/histogram.h" 16#include "base/rand_util.h" 17#include "base/strings/string_util.h" 18#include "base/time/time.h" 19#include "net/base/host_port_pair.h" 20#include "net/base/load_flags.h" 21#include "net/base/mime_util.h" 22#include "net/base/net_errors.h" 23#include "net/base/net_util.h" 24#include "net/base/network_delegate.h" 25#include "net/base/sdch_manager.h" 26#include "net/cert/cert_status_flags.h" 27#include "net/cookies/cookie_store.h" 28#include "net/http/http_content_disposition.h" 29#include "net/http/http_network_session.h" 30#include "net/http/http_request_headers.h" 31#include "net/http/http_response_headers.h" 32#include "net/http/http_response_info.h" 33#include "net/http/http_status_code.h" 34#include "net/http/http_transaction.h" 35#include "net/http/http_transaction_factory.h" 36#include "net/http/http_util.h" 37#include "net/proxy/proxy_info.h" 38#include "net/ssl/ssl_cert_request_info.h" 39#include "net/ssl/ssl_config_service.h" 40#include "net/url_request/fraudulent_certificate_reporter.h" 41#include "net/url_request/http_user_agent_settings.h" 42#include "net/url_request/url_request.h" 43#include "net/url_request/url_request_context.h" 44#include "net/url_request/url_request_error_job.h" 45#include "net/url_request/url_request_job_factory.h" 46#include "net/url_request/url_request_redirect_job.h" 47#include "net/url_request/url_request_throttler_header_adapter.h" 48#include "net/url_request/url_request_throttler_manager.h" 49#include "net/websockets/websocket_handshake_stream_base.h" 50 51static const char kAvailDictionaryHeader[] = "Avail-Dictionary"; 52 53namespace net { 54 55class URLRequestHttpJob::HttpFilterContext : public FilterContext { 56 public: 57 explicit HttpFilterContext(URLRequestHttpJob* job); 58 virtual ~HttpFilterContext(); 59 60 // FilterContext implementation. 61 virtual bool GetMimeType(std::string* mime_type) const OVERRIDE; 62 virtual bool GetURL(GURL* gurl) const OVERRIDE; 63 virtual bool GetContentDisposition(std::string* disposition) const OVERRIDE; 64 virtual base::Time GetRequestTime() const OVERRIDE; 65 virtual bool IsCachedContent() const OVERRIDE; 66 virtual bool IsDownload() const OVERRIDE; 67 virtual bool SdchResponseExpected() const OVERRIDE; 68 virtual int64 GetByteReadCount() const OVERRIDE; 69 virtual int GetResponseCode() const OVERRIDE; 70 virtual const URLRequestContext* GetURLRequestContext() const OVERRIDE; 71 virtual void RecordPacketStats(StatisticSelector statistic) const OVERRIDE; 72 73 // Method to allow us to reset filter context for a response that should have 74 // been SDCH encoded when there is an update due to an explicit HTTP header. 75 void ResetSdchResponseToFalse(); 76 77 private: 78 URLRequestHttpJob* job_; 79 80 DISALLOW_COPY_AND_ASSIGN(HttpFilterContext); 81}; 82 83URLRequestHttpJob::HttpFilterContext::HttpFilterContext(URLRequestHttpJob* job) 84 : job_(job) { 85 DCHECK(job_); 86} 87 88URLRequestHttpJob::HttpFilterContext::~HttpFilterContext() { 89} 90 91bool URLRequestHttpJob::HttpFilterContext::GetMimeType( 92 std::string* mime_type) const { 93 return job_->GetMimeType(mime_type); 94} 95 96bool URLRequestHttpJob::HttpFilterContext::GetURL(GURL* gurl) const { 97 if (!job_->request()) 98 return false; 99 *gurl = job_->request()->url(); 100 return true; 101} 102 103bool URLRequestHttpJob::HttpFilterContext::GetContentDisposition( 104 std::string* disposition) const { 105 HttpResponseHeaders* headers = job_->GetResponseHeaders(); 106 void *iter = NULL; 107 return headers->EnumerateHeader(&iter, "Content-Disposition", disposition); 108} 109 110base::Time URLRequestHttpJob::HttpFilterContext::GetRequestTime() const { 111 return job_->request() ? job_->request()->request_time() : base::Time(); 112} 113 114bool URLRequestHttpJob::HttpFilterContext::IsCachedContent() const { 115 return job_->is_cached_content_; 116} 117 118bool URLRequestHttpJob::HttpFilterContext::IsDownload() const { 119 return (job_->request_info_.load_flags & LOAD_IS_DOWNLOAD) != 0; 120} 121 122void URLRequestHttpJob::HttpFilterContext::ResetSdchResponseToFalse() { 123 DCHECK(job_->sdch_dictionary_advertised_); 124 job_->sdch_dictionary_advertised_ = false; 125} 126 127bool URLRequestHttpJob::HttpFilterContext::SdchResponseExpected() const { 128 return job_->sdch_dictionary_advertised_; 129} 130 131int64 URLRequestHttpJob::HttpFilterContext::GetByteReadCount() const { 132 return job_->filter_input_byte_count(); 133} 134 135int URLRequestHttpJob::HttpFilterContext::GetResponseCode() const { 136 return job_->GetResponseCode(); 137} 138 139const URLRequestContext* 140URLRequestHttpJob::HttpFilterContext::GetURLRequestContext() const { 141 return job_->request() ? job_->request()->context() : NULL; 142} 143 144void URLRequestHttpJob::HttpFilterContext::RecordPacketStats( 145 StatisticSelector statistic) const { 146 job_->RecordPacketStats(statistic); 147} 148 149// TODO(darin): make sure the port blocking code is not lost 150// static 151URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request, 152 NetworkDelegate* network_delegate, 153 const std::string& scheme) { 154 DCHECK(scheme == "http" || scheme == "https" || scheme == "ws" || 155 scheme == "wss"); 156 157 if (!request->context()->http_transaction_factory()) { 158 NOTREACHED() << "requires a valid context"; 159 return new URLRequestErrorJob( 160 request, network_delegate, ERR_INVALID_ARGUMENT); 161 } 162 163 GURL redirect_url; 164 if (request->GetHSTSRedirect(&redirect_url)) { 165 return new URLRequestRedirectJob( 166 request, network_delegate, redirect_url, 167 // Use status code 307 to preserve the method, so POST requests work. 168 URLRequestRedirectJob::REDIRECT_307_TEMPORARY_REDIRECT, "HSTS"); 169 } 170 return new URLRequestHttpJob(request, 171 network_delegate, 172 request->context()->http_user_agent_settings()); 173} 174 175URLRequestHttpJob::URLRequestHttpJob( 176 URLRequest* request, 177 NetworkDelegate* network_delegate, 178 const HttpUserAgentSettings* http_user_agent_settings) 179 : URLRequestJob(request, network_delegate), 180 priority_(DEFAULT_PRIORITY), 181 response_info_(NULL), 182 response_cookies_save_index_(0), 183 proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH), 184 server_auth_state_(AUTH_STATE_DONT_NEED_AUTH), 185 start_callback_(base::Bind(&URLRequestHttpJob::OnStartCompleted, 186 base::Unretained(this))), 187 notify_before_headers_sent_callback_( 188 base::Bind(&URLRequestHttpJob::NotifyBeforeSendHeadersCallback, 189 base::Unretained(this))), 190 read_in_progress_(false), 191 throttling_entry_(NULL), 192 sdch_dictionary_advertised_(false), 193 sdch_test_activated_(false), 194 sdch_test_control_(false), 195 is_cached_content_(false), 196 request_creation_time_(), 197 packet_timing_enabled_(false), 198 done_(false), 199 bytes_observed_in_packets_(0), 200 request_time_snapshot_(), 201 final_packet_time_(), 202 filter_context_(new HttpFilterContext(this)), 203 on_headers_received_callback_( 204 base::Bind(&URLRequestHttpJob::OnHeadersReceivedCallback, 205 base::Unretained(this))), 206 awaiting_callback_(false), 207 http_user_agent_settings_(http_user_agent_settings), 208 weak_factory_(this) { 209 URLRequestThrottlerManager* manager = request->context()->throttler_manager(); 210 if (manager) 211 throttling_entry_ = manager->RegisterRequestUrl(request->url()); 212 213 ResetTimer(); 214} 215 216URLRequestHttpJob::~URLRequestHttpJob() { 217 CHECK(!awaiting_callback_); 218 219 DCHECK(!sdch_test_control_ || !sdch_test_activated_); 220 if (!is_cached_content_) { 221 if (sdch_test_control_) 222 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK); 223 if (sdch_test_activated_) 224 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE); 225 } 226 // Make sure SDCH filters are told to emit histogram data while 227 // filter_context_ is still alive. 228 DestroyFilters(); 229 230 DoneWithRequest(ABORTED); 231} 232 233void URLRequestHttpJob::SetPriority(RequestPriority priority) { 234 priority_ = priority; 235 if (transaction_) 236 transaction_->SetPriority(priority_); 237} 238 239void URLRequestHttpJob::Start() { 240 DCHECK(!transaction_.get()); 241 242 // URLRequest::SetReferrer ensures that we do not send username and password 243 // fields in the referrer. 244 GURL referrer(request_->referrer()); 245 246 request_info_.url = request_->url(); 247 request_info_.method = request_->method(); 248 request_info_.load_flags = request_->load_flags(); 249 // Enable privacy mode if cookie settings or flags tell us not send or 250 // save cookies. 251 bool enable_privacy_mode = 252 (request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES) || 253 (request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) || 254 CanEnablePrivacyMode(); 255 // Privacy mode could still be disabled in OnCookiesLoaded if we are going 256 // to send previously saved cookies. 257 request_info_.privacy_mode = enable_privacy_mode ? 258 PRIVACY_MODE_ENABLED : PRIVACY_MODE_DISABLED; 259 260 // Strip Referer from request_info_.extra_headers to prevent, e.g., plugins 261 // from overriding headers that are controlled using other means. Otherwise a 262 // plugin could set a referrer although sending the referrer is inhibited. 263 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kReferer); 264 265 // Our consumer should have made sure that this is a safe referrer. See for 266 // instance WebCore::FrameLoader::HideReferrer. 267 if (referrer.is_valid()) { 268 request_info_.extra_headers.SetHeader(HttpRequestHeaders::kReferer, 269 referrer.spec()); 270 } 271 272 request_info_.extra_headers.SetHeaderIfMissing( 273 HttpRequestHeaders::kUserAgent, 274 http_user_agent_settings_ ? 275 http_user_agent_settings_->GetUserAgent() : std::string()); 276 277 AddExtraHeaders(); 278 AddCookieHeaderAndStart(); 279} 280 281void URLRequestHttpJob::Kill() { 282 if (!transaction_.get()) 283 return; 284 285 weak_factory_.InvalidateWeakPtrs(); 286 DestroyTransaction(); 287 URLRequestJob::Kill(); 288} 289 290void URLRequestHttpJob::NotifyBeforeSendProxyHeadersCallback( 291 const ProxyInfo& proxy_info, 292 HttpRequestHeaders* request_headers) { 293 DCHECK(request_headers); 294 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); 295 if (network_delegate()) { 296 network_delegate()->NotifyBeforeSendProxyHeaders( 297 request_, 298 proxy_info, 299 request_headers); 300 } 301} 302 303void URLRequestHttpJob::NotifyHeadersComplete() { 304 DCHECK(!response_info_); 305 306 response_info_ = transaction_->GetResponseInfo(); 307 308 // Save boolean, as we'll need this info at destruction time, and filters may 309 // also need this info. 310 is_cached_content_ = response_info_->was_cached; 311 312 if (!is_cached_content_ && throttling_entry_.get()) { 313 URLRequestThrottlerHeaderAdapter response_adapter(GetResponseHeaders()); 314 throttling_entry_->UpdateWithResponse(request_info_.url.host(), 315 &response_adapter); 316 } 317 318 // The ordering of these calls is not important. 319 ProcessStrictTransportSecurityHeader(); 320 ProcessPublicKeyPinsHeader(); 321 322 SdchManager* sdch_manager(request()->context()->sdch_manager()); 323 if (sdch_manager && sdch_manager->IsInSupportedDomain(request_->url())) { 324 const std::string name = "Get-Dictionary"; 325 std::string url_text; 326 void* iter = NULL; 327 // TODO(jar): We need to not fetch dictionaries the first time they are 328 // seen, but rather wait until we can justify their usefulness. 329 // For now, we will only fetch the first dictionary, which will at least 330 // require multiple suggestions before we get additional ones for this site. 331 // Eventually we should wait until a dictionary is requested several times 332 // before we even download it (so that we don't waste memory or bandwidth). 333 if (GetResponseHeaders()->EnumerateHeader(&iter, name, &url_text)) { 334 // Resolve suggested URL relative to request url. 335 GURL sdch_dictionary_url = request_->url().Resolve(url_text); 336 if (sdch_dictionary_url.is_valid()) { 337 sdch_manager->FetchDictionary(request_->url(), sdch_dictionary_url); 338 } 339 } 340 } 341 342 // The HTTP transaction may be restarted several times for the purposes 343 // of sending authorization information. Each time it restarts, we get 344 // notified of the headers completion so that we can update the cookie store. 345 if (transaction_->IsReadyToRestartForAuth()) { 346 DCHECK(!response_info_->auth_challenge.get()); 347 // TODO(battre): This breaks the webrequest API for 348 // URLRequestTestHTTP.BasicAuthWithCookies 349 // where OnBeforeSendHeaders -> OnSendHeaders -> OnBeforeSendHeaders 350 // occurs. 351 RestartTransactionWithAuth(AuthCredentials()); 352 return; 353 } 354 355 URLRequestJob::NotifyHeadersComplete(); 356} 357 358void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) { 359 DoneWithRequest(FINISHED); 360 URLRequestJob::NotifyDone(status); 361} 362 363void URLRequestHttpJob::DestroyTransaction() { 364 DCHECK(transaction_.get()); 365 366 DoneWithRequest(ABORTED); 367 transaction_.reset(); 368 response_info_ = NULL; 369 receive_headers_end_ = base::TimeTicks(); 370} 371 372void URLRequestHttpJob::StartTransaction() { 373 if (network_delegate()) { 374 OnCallToDelegate(); 375 int rv = network_delegate()->NotifyBeforeSendHeaders( 376 request_, notify_before_headers_sent_callback_, 377 &request_info_.extra_headers); 378 // If an extension blocks the request, we rely on the callback to 379 // MaybeStartTransactionInternal(). 380 if (rv == ERR_IO_PENDING) 381 return; 382 MaybeStartTransactionInternal(rv); 383 return; 384 } 385 StartTransactionInternal(); 386} 387 388void URLRequestHttpJob::NotifyBeforeSendHeadersCallback(int result) { 389 // Check that there are no callbacks to already canceled requests. 390 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); 391 392 MaybeStartTransactionInternal(result); 393} 394 395void URLRequestHttpJob::MaybeStartTransactionInternal(int result) { 396 OnCallToDelegateComplete(); 397 if (result == OK) { 398 StartTransactionInternal(); 399 } else { 400 std::string source("delegate"); 401 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 402 NetLog::StringCallback("source", &source)); 403 NotifyCanceled(); 404 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 405 } 406} 407 408void URLRequestHttpJob::StartTransactionInternal() { 409 // NOTE: This method assumes that request_info_ is already setup properly. 410 411 // If we already have a transaction, then we should restart the transaction 412 // with auth provided by auth_credentials_. 413 414 int rv; 415 416 if (network_delegate()) { 417 network_delegate()->NotifySendHeaders( 418 request_, request_info_.extra_headers); 419 } 420 421 if (transaction_.get()) { 422 rv = transaction_->RestartWithAuth(auth_credentials_, start_callback_); 423 auth_credentials_ = AuthCredentials(); 424 } else { 425 DCHECK(request_->context()->http_transaction_factory()); 426 427 rv = request_->context()->http_transaction_factory()->CreateTransaction( 428 priority_, &transaction_); 429 430 if (rv == OK && request_info_.url.SchemeIsWSOrWSS()) { 431 base::SupportsUserData::Data* data = request_->GetUserData( 432 WebSocketHandshakeStreamBase::CreateHelper::DataKey()); 433 if (data) { 434 transaction_->SetWebSocketHandshakeStreamCreateHelper( 435 static_cast<WebSocketHandshakeStreamBase::CreateHelper*>(data)); 436 } else { 437 rv = ERR_DISALLOWED_URL_SCHEME; 438 } 439 } 440 441 if (rv == OK) { 442 transaction_->SetBeforeNetworkStartCallback( 443 base::Bind(&URLRequestHttpJob::NotifyBeforeNetworkStart, 444 base::Unretained(this))); 445 transaction_->SetBeforeProxyHeadersSentCallback( 446 base::Bind(&URLRequestHttpJob::NotifyBeforeSendProxyHeadersCallback, 447 base::Unretained(this))); 448 449 if (!throttling_entry_.get() || 450 !throttling_entry_->ShouldRejectRequest(*request_, 451 network_delegate())) { 452 rv = transaction_->Start( 453 &request_info_, start_callback_, request_->net_log()); 454 start_time_ = base::TimeTicks::Now(); 455 } else { 456 // Special error code for the exponential back-off module. 457 rv = ERR_TEMPORARILY_THROTTLED; 458 } 459 } 460 } 461 462 if (rv == ERR_IO_PENDING) 463 return; 464 465 // The transaction started synchronously, but we need to notify the 466 // URLRequest delegate via the message loop. 467 base::MessageLoop::current()->PostTask( 468 FROM_HERE, 469 base::Bind(&URLRequestHttpJob::OnStartCompleted, 470 weak_factory_.GetWeakPtr(), rv)); 471} 472 473void URLRequestHttpJob::AddExtraHeaders() { 474 SdchManager* sdch_manager = request()->context()->sdch_manager(); 475 476 // Supply Accept-Encoding field only if it is not already provided. 477 // It should be provided IF the content is known to have restrictions on 478 // potential encoding, such as streaming multi-media. 479 // For details see bug 47381. 480 // TODO(jar, enal): jpeg files etc. should set up a request header if 481 // possible. Right now it is done only by buffered_resource_loader and 482 // simple_data_source. 483 if (!request_info_.extra_headers.HasHeader( 484 HttpRequestHeaders::kAcceptEncoding)) { 485 bool advertise_sdch = sdch_manager && 486 // We don't support SDCH responses to POST as there is a possibility 487 // of having SDCH encoded responses returned (e.g. by the cache) 488 // which we cannot decode, and in those situations, we will need 489 // to retransmit the request without SDCH, which is illegal for a POST. 490 request()->method() != "POST" && 491 sdch_manager->IsInSupportedDomain(request_->url()); 492 std::string avail_dictionaries; 493 if (advertise_sdch) { 494 sdch_manager->GetAvailDictionaryList(request_->url(), 495 &avail_dictionaries); 496 497 // The AllowLatencyExperiment() is only true if we've successfully done a 498 // full SDCH compression recently in this browser session for this host. 499 // Note that for this path, there might be no applicable dictionaries, 500 // and hence we can't participate in the experiment. 501 if (!avail_dictionaries.empty() && 502 sdch_manager->AllowLatencyExperiment(request_->url())) { 503 // We are participating in the test (or control), and hence we'll 504 // eventually record statistics via either SDCH_EXPERIMENT_DECODE or 505 // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data. 506 packet_timing_enabled_ = true; 507 if (base::RandDouble() < .01) { 508 sdch_test_control_ = true; // 1% probability. 509 advertise_sdch = false; 510 } else { 511 sdch_test_activated_ = true; 512 } 513 } 514 } 515 516 // Supply Accept-Encoding headers first so that it is more likely that they 517 // will be in the first transmitted packet. This can sometimes make it 518 // easier to filter and analyze the streams to assure that a proxy has not 519 // damaged these headers. Some proxies deliberately corrupt Accept-Encoding 520 // headers. 521 if (!advertise_sdch) { 522 // Tell the server what compression formats we support (other than SDCH). 523 request_info_.extra_headers.SetHeader( 524 HttpRequestHeaders::kAcceptEncoding, "gzip, deflate"); 525 } else { 526 // Include SDCH in acceptable list. 527 request_info_.extra_headers.SetHeader( 528 HttpRequestHeaders::kAcceptEncoding, "gzip, deflate, sdch"); 529 if (!avail_dictionaries.empty()) { 530 request_info_.extra_headers.SetHeader( 531 kAvailDictionaryHeader, 532 avail_dictionaries); 533 sdch_dictionary_advertised_ = true; 534 // Since we're tagging this transaction as advertising a dictionary, 535 // we'll definitely employ an SDCH filter (or tentative sdch filter) 536 // when we get a response. When done, we'll record histograms via 537 // SDCH_DECODE or SDCH_PASSTHROUGH. Hence we need to record packet 538 // arrival times. 539 packet_timing_enabled_ = true; 540 } 541 } 542 } 543 544 if (http_user_agent_settings_) { 545 // Only add default Accept-Language if the request didn't have it 546 // specified. 547 std::string accept_language = 548 http_user_agent_settings_->GetAcceptLanguage(); 549 if (!accept_language.empty()) { 550 request_info_.extra_headers.SetHeaderIfMissing( 551 HttpRequestHeaders::kAcceptLanguage, 552 accept_language); 553 } 554 } 555} 556 557void URLRequestHttpJob::AddCookieHeaderAndStart() { 558 // No matter what, we want to report our status as IO pending since we will 559 // be notifying our consumer asynchronously via OnStartCompleted. 560 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 561 562 // If the request was destroyed, then there is no more work to do. 563 if (!request_) 564 return; 565 566 CookieStore* cookie_store = GetCookieStore(); 567 if (cookie_store && !(request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES)) { 568 cookie_store->GetAllCookiesForURLAsync( 569 request_->url(), 570 base::Bind(&URLRequestHttpJob::CheckCookiePolicyAndLoad, 571 weak_factory_.GetWeakPtr())); 572 } else { 573 DoStartTransaction(); 574 } 575} 576 577void URLRequestHttpJob::DoLoadCookies() { 578 CookieOptions options; 579 options.set_include_httponly(); 580 GetCookieStore()->GetCookiesWithOptionsAsync( 581 request_->url(), options, 582 base::Bind(&URLRequestHttpJob::OnCookiesLoaded, 583 weak_factory_.GetWeakPtr())); 584} 585 586void URLRequestHttpJob::CheckCookiePolicyAndLoad( 587 const CookieList& cookie_list) { 588 if (CanGetCookies(cookie_list)) 589 DoLoadCookies(); 590 else 591 DoStartTransaction(); 592} 593 594void URLRequestHttpJob::OnCookiesLoaded(const std::string& cookie_line) { 595 if (!cookie_line.empty()) { 596 request_info_.extra_headers.SetHeader( 597 HttpRequestHeaders::kCookie, cookie_line); 598 // Disable privacy mode as we are sending cookies anyway. 599 request_info_.privacy_mode = PRIVACY_MODE_DISABLED; 600 } 601 DoStartTransaction(); 602} 603 604void URLRequestHttpJob::DoStartTransaction() { 605 // We may have been canceled while retrieving cookies. 606 if (GetStatus().is_success()) { 607 StartTransaction(); 608 } else { 609 NotifyCanceled(); 610 } 611} 612 613void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete(int result) { 614 // End of the call started in OnStartCompleted. 615 OnCallToDelegateComplete(); 616 617 if (result != net::OK) { 618 std::string source("delegate"); 619 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 620 NetLog::StringCallback("source", &source)); 621 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 622 return; 623 } 624 625 DCHECK(transaction_.get()); 626 627 const HttpResponseInfo* response_info = transaction_->GetResponseInfo(); 628 DCHECK(response_info); 629 630 response_cookies_.clear(); 631 response_cookies_save_index_ = 0; 632 633 FetchResponseCookies(&response_cookies_); 634 635 if (!GetResponseHeaders()->GetDateValue(&response_date_)) 636 response_date_ = base::Time(); 637 638 // Now, loop over the response cookies, and attempt to persist each. 639 SaveNextCookie(); 640} 641 642// If the save occurs synchronously, SaveNextCookie will loop and save the next 643// cookie. If the save is deferred, the callback is responsible for continuing 644// to iterate through the cookies. 645// TODO(erikwright): Modify the CookieStore API to indicate via return value 646// whether it completed synchronously or asynchronously. 647// See http://crbug.com/131066. 648void URLRequestHttpJob::SaveNextCookie() { 649 // No matter what, we want to report our status as IO pending since we will 650 // be notifying our consumer asynchronously via OnStartCompleted. 651 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 652 653 // Used to communicate with the callback. See the implementation of 654 // OnCookieSaved. 655 scoped_refptr<SharedBoolean> callback_pending = new SharedBoolean(false); 656 scoped_refptr<SharedBoolean> save_next_cookie_running = 657 new SharedBoolean(true); 658 659 if (!(request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) && 660 GetCookieStore() && response_cookies_.size() > 0) { 661 CookieOptions options; 662 options.set_include_httponly(); 663 options.set_server_time(response_date_); 664 665 net::CookieStore::SetCookiesCallback callback( 666 base::Bind(&URLRequestHttpJob::OnCookieSaved, 667 weak_factory_.GetWeakPtr(), 668 save_next_cookie_running, 669 callback_pending)); 670 671 // Loop through the cookies as long as SetCookieWithOptionsAsync completes 672 // synchronously. 673 while (!callback_pending->data && 674 response_cookies_save_index_ < response_cookies_.size()) { 675 if (CanSetCookie( 676 response_cookies_[response_cookies_save_index_], &options)) { 677 callback_pending->data = true; 678 GetCookieStore()->SetCookieWithOptionsAsync( 679 request_->url(), response_cookies_[response_cookies_save_index_], 680 options, callback); 681 } 682 ++response_cookies_save_index_; 683 } 684 } 685 686 save_next_cookie_running->data = false; 687 688 if (!callback_pending->data) { 689 response_cookies_.clear(); 690 response_cookies_save_index_ = 0; 691 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status 692 NotifyHeadersComplete(); 693 return; 694 } 695} 696 697// |save_next_cookie_running| is true when the callback is bound and set to 698// false when SaveNextCookie exits, allowing the callback to determine if the 699// save occurred synchronously or asynchronously. 700// |callback_pending| is false when the callback is invoked and will be set to 701// true by the callback, allowing SaveNextCookie to detect whether the save 702// occurred synchronously. 703// See SaveNextCookie() for more information. 704void URLRequestHttpJob::OnCookieSaved( 705 scoped_refptr<SharedBoolean> save_next_cookie_running, 706 scoped_refptr<SharedBoolean> callback_pending, 707 bool cookie_status) { 708 callback_pending->data = false; 709 710 // If we were called synchronously, return. 711 if (save_next_cookie_running->data) { 712 return; 713 } 714 715 // We were called asynchronously, so trigger the next save. 716 // We may have been canceled within OnSetCookie. 717 if (GetStatus().is_success()) { 718 SaveNextCookie(); 719 } else { 720 NotifyCanceled(); 721 } 722} 723 724void URLRequestHttpJob::FetchResponseCookies( 725 std::vector<std::string>* cookies) { 726 const std::string name = "Set-Cookie"; 727 std::string value; 728 729 void* iter = NULL; 730 HttpResponseHeaders* headers = GetResponseHeaders(); 731 while (headers->EnumerateHeader(&iter, name, &value)) { 732 if (!value.empty()) 733 cookies->push_back(value); 734 } 735} 736 737// NOTE: |ProcessStrictTransportSecurityHeader| and 738// |ProcessPublicKeyPinsHeader| have very similar structures, by design. 739void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() { 740 DCHECK(response_info_); 741 TransportSecurityState* security_state = 742 request_->context()->transport_security_state(); 743 const SSLInfo& ssl_info = response_info_->ssl_info; 744 745 // Only accept HSTS headers on HTTPS connections that have no 746 // certificate errors. 747 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) || 748 !security_state) 749 return; 750 751 // http://tools.ietf.org/html/draft-ietf-websec-strict-transport-sec: 752 // 753 // If a UA receives more than one STS header field in a HTTP response 754 // message over secure transport, then the UA MUST process only the 755 // first such header field. 756 HttpResponseHeaders* headers = GetResponseHeaders(); 757 std::string value; 758 if (headers->EnumerateHeader(NULL, "Strict-Transport-Security", &value)) 759 security_state->AddHSTSHeader(request_info_.url.host(), value); 760} 761 762void URLRequestHttpJob::ProcessPublicKeyPinsHeader() { 763 DCHECK(response_info_); 764 TransportSecurityState* security_state = 765 request_->context()->transport_security_state(); 766 const SSLInfo& ssl_info = response_info_->ssl_info; 767 768 // Only accept HPKP headers on HTTPS connections that have no 769 // certificate errors. 770 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) || 771 !security_state) 772 return; 773 774 // http://tools.ietf.org/html/draft-ietf-websec-key-pinning: 775 // 776 // If a UA receives more than one PKP header field in an HTTP 777 // response message over secure transport, then the UA MUST process 778 // only the first such header field. 779 HttpResponseHeaders* headers = GetResponseHeaders(); 780 std::string value; 781 if (headers->EnumerateHeader(NULL, "Public-Key-Pins", &value)) 782 security_state->AddHPKPHeader(request_info_.url.host(), value, ssl_info); 783} 784 785void URLRequestHttpJob::OnStartCompleted(int result) { 786 RecordTimer(); 787 788 // If the request was destroyed, then there is no more work to do. 789 if (!request_) 790 return; 791 792 // If the job is done (due to cancellation), can just ignore this 793 // notification. 794 if (done_) 795 return; 796 797 receive_headers_end_ = base::TimeTicks::Now(); 798 799 // Clear the IO_PENDING status 800 SetStatus(URLRequestStatus()); 801 802 const URLRequestContext* context = request_->context(); 803 804 if (result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN && 805 transaction_->GetResponseInfo() != NULL) { 806 FraudulentCertificateReporter* reporter = 807 context->fraudulent_certificate_reporter(); 808 if (reporter != NULL) { 809 const SSLInfo& ssl_info = transaction_->GetResponseInfo()->ssl_info; 810 const std::string& host = request_->url().host(); 811 812 reporter->SendReport(host, ssl_info); 813 } 814 } 815 816 if (result == OK) { 817 if (transaction_ && transaction_->GetResponseInfo()) { 818 SetProxyServer(transaction_->GetResponseInfo()->proxy_server); 819 } 820 scoped_refptr<HttpResponseHeaders> headers = GetResponseHeaders(); 821 if (network_delegate()) { 822 // Note that |this| may not be deleted until 823 // |on_headers_received_callback_| or 824 // |NetworkDelegate::URLRequestDestroyed()| has been called. 825 OnCallToDelegate(); 826 allowed_unsafe_redirect_url_ = GURL(); 827 int error = network_delegate()->NotifyHeadersReceived( 828 request_, 829 on_headers_received_callback_, 830 headers.get(), 831 &override_response_headers_, 832 &allowed_unsafe_redirect_url_); 833 if (error != net::OK) { 834 if (error == net::ERR_IO_PENDING) { 835 awaiting_callback_ = true; 836 } else { 837 std::string source("delegate"); 838 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 839 NetLog::StringCallback("source", 840 &source)); 841 OnCallToDelegateComplete(); 842 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, error)); 843 } 844 return; 845 } 846 } 847 848 SaveCookiesAndNotifyHeadersComplete(net::OK); 849 } else if (IsCertificateError(result)) { 850 // We encountered an SSL certificate error. 851 if (result == ERR_SSL_WEAK_SERVER_EPHEMERAL_DH_KEY || 852 result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN) { 853 // These are hard failures. They're handled separately and don't have 854 // the correct cert status, so set it here. 855 SSLInfo info(transaction_->GetResponseInfo()->ssl_info); 856 info.cert_status = MapNetErrorToCertStatus(result); 857 NotifySSLCertificateError(info, true); 858 } else { 859 // Maybe overridable, maybe not. Ask the delegate to decide. 860 const URLRequestContext* context = request_->context(); 861 TransportSecurityState* state = context->transport_security_state(); 862 const bool fatal = 863 state && state->ShouldSSLErrorsBeFatal(request_info_.url.host()); 864 NotifySSLCertificateError( 865 transaction_->GetResponseInfo()->ssl_info, fatal); 866 } 867 } else if (result == ERR_SSL_CLIENT_AUTH_CERT_NEEDED) { 868 NotifyCertificateRequested( 869 transaction_->GetResponseInfo()->cert_request_info.get()); 870 } else { 871 // Even on an error, there may be useful information in the response 872 // info (e.g. whether there's a cached copy). 873 if (transaction_.get()) 874 response_info_ = transaction_->GetResponseInfo(); 875 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 876 } 877} 878 879void URLRequestHttpJob::OnHeadersReceivedCallback(int result) { 880 awaiting_callback_ = false; 881 882 // Check that there are no callbacks to already canceled requests. 883 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); 884 885 SaveCookiesAndNotifyHeadersComplete(result); 886} 887 888void URLRequestHttpJob::OnReadCompleted(int result) { 889 read_in_progress_ = false; 890 891 if (ShouldFixMismatchedContentLength(result)) 892 result = OK; 893 894 if (result == OK) { 895 NotifyDone(URLRequestStatus()); 896 } else if (result < 0) { 897 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); 898 } else { 899 // Clear the IO_PENDING status 900 SetStatus(URLRequestStatus()); 901 } 902 903 NotifyReadComplete(result); 904} 905 906void URLRequestHttpJob::RestartTransactionWithAuth( 907 const AuthCredentials& credentials) { 908 auth_credentials_ = credentials; 909 910 // These will be reset in OnStartCompleted. 911 response_info_ = NULL; 912 receive_headers_end_ = base::TimeTicks(); 913 response_cookies_.clear(); 914 915 ResetTimer(); 916 917 // Update the cookies, since the cookie store may have been updated from the 918 // headers in the 401/407. Since cookies were already appended to 919 // extra_headers, we need to strip them out before adding them again. 920 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kCookie); 921 922 AddCookieHeaderAndStart(); 923} 924 925void URLRequestHttpJob::SetUpload(UploadDataStream* upload) { 926 DCHECK(!transaction_.get()) << "cannot change once started"; 927 request_info_.upload_data_stream = upload; 928} 929 930void URLRequestHttpJob::SetExtraRequestHeaders( 931 const HttpRequestHeaders& headers) { 932 DCHECK(!transaction_.get()) << "cannot change once started"; 933 request_info_.extra_headers.CopyFrom(headers); 934} 935 936LoadState URLRequestHttpJob::GetLoadState() const { 937 return transaction_.get() ? 938 transaction_->GetLoadState() : LOAD_STATE_IDLE; 939} 940 941UploadProgress URLRequestHttpJob::GetUploadProgress() const { 942 return transaction_.get() ? 943 transaction_->GetUploadProgress() : UploadProgress(); 944} 945 946bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const { 947 DCHECK(transaction_.get()); 948 949 if (!response_info_) 950 return false; 951 952 return GetResponseHeaders()->GetMimeType(mime_type); 953} 954 955bool URLRequestHttpJob::GetCharset(std::string* charset) { 956 DCHECK(transaction_.get()); 957 958 if (!response_info_) 959 return false; 960 961 return GetResponseHeaders()->GetCharset(charset); 962} 963 964void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) { 965 DCHECK(request_); 966 967 if (response_info_) { 968 DCHECK(transaction_.get()); 969 970 *info = *response_info_; 971 if (override_response_headers_.get()) 972 info->headers = override_response_headers_; 973 } 974} 975 976void URLRequestHttpJob::GetLoadTimingInfo( 977 LoadTimingInfo* load_timing_info) const { 978 // If haven't made it far enough to receive any headers, don't return 979 // anything. This makes for more consistent behavior in the case of errors. 980 if (!transaction_ || receive_headers_end_.is_null()) 981 return; 982 if (transaction_->GetLoadTimingInfo(load_timing_info)) 983 load_timing_info->receive_headers_end = receive_headers_end_; 984} 985 986bool URLRequestHttpJob::GetResponseCookies(std::vector<std::string>* cookies) { 987 DCHECK(transaction_.get()); 988 989 if (!response_info_) 990 return false; 991 992 // TODO(darin): Why are we extracting response cookies again? Perhaps we 993 // should just leverage response_cookies_. 994 995 cookies->clear(); 996 FetchResponseCookies(cookies); 997 return true; 998} 999 1000int URLRequestHttpJob::GetResponseCode() const { 1001 DCHECK(transaction_.get()); 1002 1003 if (!response_info_) 1004 return -1; 1005 1006 return GetResponseHeaders()->response_code(); 1007} 1008 1009Filter* URLRequestHttpJob::SetupFilter() const { 1010 DCHECK(transaction_.get()); 1011 if (!response_info_) 1012 return NULL; 1013 1014 std::vector<Filter::FilterType> encoding_types; 1015 std::string encoding_type; 1016 HttpResponseHeaders* headers = GetResponseHeaders(); 1017 void* iter = NULL; 1018 while (headers->EnumerateHeader(&iter, "Content-Encoding", &encoding_type)) { 1019 encoding_types.push_back(Filter::ConvertEncodingToType(encoding_type)); 1020 } 1021 1022 if (filter_context_->SdchResponseExpected()) { 1023 // We are wary of proxies that discard or damage SDCH encoding. If a server 1024 // explicitly states that this is not SDCH content, then we can correct our 1025 // assumption that this is an SDCH response, and avoid the need to recover 1026 // as though the content is corrupted (when we discover it is not SDCH 1027 // encoded). 1028 std::string sdch_response_status; 1029 iter = NULL; 1030 while (headers->EnumerateHeader(&iter, "X-Sdch-Encode", 1031 &sdch_response_status)) { 1032 if (sdch_response_status == "0") { 1033 filter_context_->ResetSdchResponseToFalse(); 1034 break; 1035 } 1036 } 1037 } 1038 1039 // Even if encoding types are empty, there is a chance that we need to add 1040 // some decoding, as some proxies strip encoding completely. In such cases, 1041 // we may need to add (for example) SDCH filtering (when the context suggests 1042 // it is appropriate). 1043 Filter::FixupEncodingTypes(*filter_context_, &encoding_types); 1044 1045 return !encoding_types.empty() 1046 ? Filter::Factory(encoding_types, *filter_context_) : NULL; 1047} 1048 1049bool URLRequestHttpJob::CopyFragmentOnRedirect(const GURL& location) const { 1050 // Allow modification of reference fragments by default, unless 1051 // |allowed_unsafe_redirect_url_| is set and equal to the redirect URL. 1052 // When this is the case, we assume that the network delegate has set the 1053 // desired redirect URL (with or without fragment), so it must not be changed 1054 // any more. 1055 return !allowed_unsafe_redirect_url_.is_valid() || 1056 allowed_unsafe_redirect_url_ != location; 1057} 1058 1059bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) { 1060 // HTTP is always safe. 1061 // TODO(pauljensen): Remove once crbug.com/146591 is fixed. 1062 if (location.is_valid() && 1063 (location.scheme() == "http" || location.scheme() == "https")) { 1064 return true; 1065 } 1066 // Delegates may mark a URL as safe for redirection. 1067 if (allowed_unsafe_redirect_url_.is_valid() && 1068 allowed_unsafe_redirect_url_ == location) { 1069 return true; 1070 } 1071 // Query URLRequestJobFactory as to whether |location| would be safe to 1072 // redirect to. 1073 return request_->context()->job_factory() && 1074 request_->context()->job_factory()->IsSafeRedirectTarget(location); 1075} 1076 1077bool URLRequestHttpJob::NeedsAuth() { 1078 int code = GetResponseCode(); 1079 if (code == -1) 1080 return false; 1081 1082 // Check if we need either Proxy or WWW Authentication. This could happen 1083 // because we either provided no auth info, or provided incorrect info. 1084 switch (code) { 1085 case 407: 1086 if (proxy_auth_state_ == AUTH_STATE_CANCELED) 1087 return false; 1088 proxy_auth_state_ = AUTH_STATE_NEED_AUTH; 1089 return true; 1090 case 401: 1091 if (server_auth_state_ == AUTH_STATE_CANCELED) 1092 return false; 1093 server_auth_state_ = AUTH_STATE_NEED_AUTH; 1094 return true; 1095 } 1096 return false; 1097} 1098 1099void URLRequestHttpJob::GetAuthChallengeInfo( 1100 scoped_refptr<AuthChallengeInfo>* result) { 1101 DCHECK(transaction_.get()); 1102 DCHECK(response_info_); 1103 1104 // sanity checks: 1105 DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH || 1106 server_auth_state_ == AUTH_STATE_NEED_AUTH); 1107 DCHECK((GetResponseHeaders()->response_code() == HTTP_UNAUTHORIZED) || 1108 (GetResponseHeaders()->response_code() == 1109 HTTP_PROXY_AUTHENTICATION_REQUIRED)); 1110 1111 *result = response_info_->auth_challenge; 1112} 1113 1114void URLRequestHttpJob::SetAuth(const AuthCredentials& credentials) { 1115 DCHECK(transaction_.get()); 1116 1117 // Proxy gets set first, then WWW. 1118 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { 1119 proxy_auth_state_ = AUTH_STATE_HAVE_AUTH; 1120 } else { 1121 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); 1122 server_auth_state_ = AUTH_STATE_HAVE_AUTH; 1123 } 1124 1125 RestartTransactionWithAuth(credentials); 1126} 1127 1128void URLRequestHttpJob::CancelAuth() { 1129 // Proxy gets set first, then WWW. 1130 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { 1131 proxy_auth_state_ = AUTH_STATE_CANCELED; 1132 } else { 1133 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); 1134 server_auth_state_ = AUTH_STATE_CANCELED; 1135 } 1136 1137 // These will be reset in OnStartCompleted. 1138 response_info_ = NULL; 1139 receive_headers_end_ = base::TimeTicks::Now(); 1140 response_cookies_.clear(); 1141 1142 ResetTimer(); 1143 1144 // OK, let the consumer read the error page... 1145 // 1146 // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false, 1147 // which will cause the consumer to receive OnResponseStarted instead of 1148 // OnAuthRequired. 1149 // 1150 // We have to do this via InvokeLater to avoid "recursing" the consumer. 1151 // 1152 base::MessageLoop::current()->PostTask( 1153 FROM_HERE, 1154 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1155 weak_factory_.GetWeakPtr(), OK)); 1156} 1157 1158void URLRequestHttpJob::ContinueWithCertificate( 1159 X509Certificate* client_cert) { 1160 DCHECK(transaction_.get()); 1161 1162 DCHECK(!response_info_) << "should not have a response yet"; 1163 receive_headers_end_ = base::TimeTicks(); 1164 1165 ResetTimer(); 1166 1167 // No matter what, we want to report our status as IO pending since we will 1168 // be notifying our consumer asynchronously via OnStartCompleted. 1169 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1170 1171 int rv = transaction_->RestartWithCertificate(client_cert, start_callback_); 1172 if (rv == ERR_IO_PENDING) 1173 return; 1174 1175 // The transaction started synchronously, but we need to notify the 1176 // URLRequest delegate via the message loop. 1177 base::MessageLoop::current()->PostTask( 1178 FROM_HERE, 1179 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1180 weak_factory_.GetWeakPtr(), rv)); 1181} 1182 1183void URLRequestHttpJob::ContinueDespiteLastError() { 1184 // If the transaction was destroyed, then the job was cancelled. 1185 if (!transaction_.get()) 1186 return; 1187 1188 DCHECK(!response_info_) << "should not have a response yet"; 1189 receive_headers_end_ = base::TimeTicks(); 1190 1191 ResetTimer(); 1192 1193 // No matter what, we want to report our status as IO pending since we will 1194 // be notifying our consumer asynchronously via OnStartCompleted. 1195 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1196 1197 int rv = transaction_->RestartIgnoringLastError(start_callback_); 1198 if (rv == ERR_IO_PENDING) 1199 return; 1200 1201 // The transaction started synchronously, but we need to notify the 1202 // URLRequest delegate via the message loop. 1203 base::MessageLoop::current()->PostTask( 1204 FROM_HERE, 1205 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1206 weak_factory_.GetWeakPtr(), rv)); 1207} 1208 1209void URLRequestHttpJob::ResumeNetworkStart() { 1210 DCHECK(transaction_.get()); 1211 transaction_->ResumeNetworkStart(); 1212} 1213 1214bool URLRequestHttpJob::ShouldFixMismatchedContentLength(int rv) const { 1215 // Some servers send the body compressed, but specify the content length as 1216 // the uncompressed size. Although this violates the HTTP spec we want to 1217 // support it (as IE and FireFox do), but *only* for an exact match. 1218 // See http://crbug.com/79694. 1219 if (rv == net::ERR_CONTENT_LENGTH_MISMATCH || 1220 rv == net::ERR_INCOMPLETE_CHUNKED_ENCODING) { 1221 if (request_ && request_->response_headers()) { 1222 int64 expected_length = request_->response_headers()->GetContentLength(); 1223 VLOG(1) << __FUNCTION__ << "() " 1224 << "\"" << request_->url().spec() << "\"" 1225 << " content-length = " << expected_length 1226 << " pre total = " << prefilter_bytes_read() 1227 << " post total = " << postfilter_bytes_read(); 1228 if (postfilter_bytes_read() == expected_length) { 1229 // Clear the error. 1230 return true; 1231 } 1232 } 1233 } 1234 return false; 1235} 1236 1237bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size, 1238 int* bytes_read) { 1239 DCHECK_NE(buf_size, 0); 1240 DCHECK(bytes_read); 1241 DCHECK(!read_in_progress_); 1242 1243 int rv = transaction_->Read( 1244 buf, buf_size, 1245 base::Bind(&URLRequestHttpJob::OnReadCompleted, base::Unretained(this))); 1246 1247 if (ShouldFixMismatchedContentLength(rv)) 1248 rv = 0; 1249 1250 if (rv >= 0) { 1251 *bytes_read = rv; 1252 if (!rv) 1253 DoneWithRequest(FINISHED); 1254 return true; 1255 } 1256 1257 if (rv == ERR_IO_PENDING) { 1258 read_in_progress_ = true; 1259 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1260 } else { 1261 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); 1262 } 1263 1264 return false; 1265} 1266 1267void URLRequestHttpJob::StopCaching() { 1268 if (transaction_.get()) 1269 transaction_->StopCaching(); 1270} 1271 1272bool URLRequestHttpJob::GetFullRequestHeaders( 1273 HttpRequestHeaders* headers) const { 1274 if (!transaction_) 1275 return false; 1276 1277 return transaction_->GetFullRequestHeaders(headers); 1278} 1279 1280int64 URLRequestHttpJob::GetTotalReceivedBytes() const { 1281 if (!transaction_) 1282 return 0; 1283 1284 return transaction_->GetTotalReceivedBytes(); 1285} 1286 1287void URLRequestHttpJob::DoneReading() { 1288 if (transaction_) { 1289 transaction_->DoneReading(); 1290 } 1291 DoneWithRequest(FINISHED); 1292} 1293 1294void URLRequestHttpJob::DoneReadingRedirectResponse() { 1295 if (transaction_) { 1296 if (transaction_->GetResponseInfo()->headers->IsRedirect(NULL)) { 1297 // If the original headers indicate a redirect, go ahead and cache the 1298 // response, even if the |override_response_headers_| are a redirect to 1299 // another location. 1300 transaction_->DoneReading(); 1301 } else { 1302 // Otherwise, |override_response_headers_| must be non-NULL and contain 1303 // bogus headers indicating a redirect. 1304 DCHECK(override_response_headers_.get()); 1305 DCHECK(override_response_headers_->IsRedirect(NULL)); 1306 transaction_->StopCaching(); 1307 } 1308 } 1309 DoneWithRequest(FINISHED); 1310} 1311 1312HostPortPair URLRequestHttpJob::GetSocketAddress() const { 1313 return response_info_ ? response_info_->socket_address : HostPortPair(); 1314} 1315 1316void URLRequestHttpJob::RecordTimer() { 1317 if (request_creation_time_.is_null()) { 1318 NOTREACHED() 1319 << "The same transaction shouldn't start twice without new timing."; 1320 return; 1321 } 1322 1323 base::TimeDelta to_start = base::Time::Now() - request_creation_time_; 1324 request_creation_time_ = base::Time(); 1325 1326 UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte", to_start); 1327} 1328 1329void URLRequestHttpJob::ResetTimer() { 1330 if (!request_creation_time_.is_null()) { 1331 NOTREACHED() 1332 << "The timer was reset before it was recorded."; 1333 return; 1334 } 1335 request_creation_time_ = base::Time::Now(); 1336} 1337 1338void URLRequestHttpJob::UpdatePacketReadTimes() { 1339 if (!packet_timing_enabled_) 1340 return; 1341 1342 if (filter_input_byte_count() <= bytes_observed_in_packets_) { 1343 DCHECK_EQ(filter_input_byte_count(), bytes_observed_in_packets_); 1344 return; // No new bytes have arrived. 1345 } 1346 1347 base::Time now(base::Time::Now()); 1348 if (!bytes_observed_in_packets_) 1349 request_time_snapshot_ = now; 1350 final_packet_time_ = now; 1351 1352 bytes_observed_in_packets_ = filter_input_byte_count(); 1353} 1354 1355void URLRequestHttpJob::RecordPacketStats( 1356 FilterContext::StatisticSelector statistic) const { 1357 if (!packet_timing_enabled_ || (final_packet_time_ == base::Time())) 1358 return; 1359 1360 base::TimeDelta duration = final_packet_time_ - request_time_snapshot_; 1361 switch (statistic) { 1362 case FilterContext::SDCH_DECODE: { 1363 UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b", 1364 static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100); 1365 return; 1366 } 1367 case FilterContext::SDCH_PASSTHROUGH: { 1368 // Despite advertising a dictionary, we handled non-sdch compressed 1369 // content. 1370 return; 1371 } 1372 1373 case FilterContext::SDCH_EXPERIMENT_DECODE: { 1374 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment3_Decode", 1375 duration, 1376 base::TimeDelta::FromMilliseconds(20), 1377 base::TimeDelta::FromMinutes(10), 100); 1378 return; 1379 } 1380 case FilterContext::SDCH_EXPERIMENT_HOLDBACK: { 1381 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment3_Holdback", 1382 duration, 1383 base::TimeDelta::FromMilliseconds(20), 1384 base::TimeDelta::FromMinutes(10), 100); 1385 return; 1386 } 1387 default: 1388 NOTREACHED(); 1389 return; 1390 } 1391} 1392 1393// The common type of histogram we use for all compression-tracking histograms. 1394#define COMPRESSION_HISTOGRAM(name, sample) \ 1395 do { \ 1396 UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \ 1397 500, 1000000, 100); \ 1398 } while (0) 1399 1400void URLRequestHttpJob::RecordCompressionHistograms() { 1401 DCHECK(request_); 1402 if (!request_) 1403 return; 1404 1405 if (is_cached_content_ || // Don't record cached content 1406 !GetStatus().is_success() || // Don't record failed content 1407 !IsCompressibleContent() || // Only record compressible content 1408 !prefilter_bytes_read()) // Zero-byte responses aren't useful. 1409 return; 1410 1411 // Miniature requests aren't really compressible. Don't count them. 1412 const int kMinSize = 16; 1413 if (prefilter_bytes_read() < kMinSize) 1414 return; 1415 1416 // Only record for http or https urls. 1417 bool is_http = request_->url().SchemeIs("http"); 1418 bool is_https = request_->url().SchemeIs("https"); 1419 if (!is_http && !is_https) 1420 return; 1421 1422 int compressed_B = prefilter_bytes_read(); 1423 int decompressed_B = postfilter_bytes_read(); 1424 bool was_filtered = HasFilter(); 1425 1426 // We want to record how often downloaded resources are compressed. 1427 // But, we recognize that different protocols may have different 1428 // properties. So, for each request, we'll put it into one of 3 1429 // groups: 1430 // a) SSL resources 1431 // Proxies cannot tamper with compression headers with SSL. 1432 // b) Non-SSL, loaded-via-proxy resources 1433 // In this case, we know a proxy might have interfered. 1434 // c) Non-SSL, loaded-without-proxy resources 1435 // In this case, we know there was no explicit proxy. However, 1436 // it is possible that a transparent proxy was still interfering. 1437 // 1438 // For each group, we record the same 3 histograms. 1439 1440 if (is_https) { 1441 if (was_filtered) { 1442 COMPRESSION_HISTOGRAM("SSL.BytesBeforeCompression", compressed_B); 1443 COMPRESSION_HISTOGRAM("SSL.BytesAfterCompression", decompressed_B); 1444 } else { 1445 COMPRESSION_HISTOGRAM("SSL.ShouldHaveBeenCompressed", decompressed_B); 1446 } 1447 return; 1448 } 1449 1450 if (request_->was_fetched_via_proxy()) { 1451 if (was_filtered) { 1452 COMPRESSION_HISTOGRAM("Proxy.BytesBeforeCompression", compressed_B); 1453 COMPRESSION_HISTOGRAM("Proxy.BytesAfterCompression", decompressed_B); 1454 } else { 1455 COMPRESSION_HISTOGRAM("Proxy.ShouldHaveBeenCompressed", decompressed_B); 1456 } 1457 return; 1458 } 1459 1460 if (was_filtered) { 1461 COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B); 1462 COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B); 1463 } else { 1464 COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B); 1465 } 1466} 1467 1468bool URLRequestHttpJob::IsCompressibleContent() const { 1469 std::string mime_type; 1470 return GetMimeType(&mime_type) && 1471 (IsSupportedJavascriptMimeType(mime_type.c_str()) || 1472 IsSupportedNonImageMimeType(mime_type.c_str())); 1473} 1474 1475void URLRequestHttpJob::RecordPerfHistograms(CompletionCause reason) { 1476 if (start_time_.is_null()) 1477 return; 1478 1479 base::TimeDelta total_time = base::TimeTicks::Now() - start_time_; 1480 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTime", total_time); 1481 1482 if (reason == FINISHED) { 1483 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeSuccess", total_time); 1484 } else { 1485 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCancel", total_time); 1486 } 1487 1488 if (response_info_) { 1489 if (response_info_->was_cached) { 1490 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCached", total_time); 1491 } else { 1492 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeNotCached", total_time); 1493 } 1494 } 1495 1496 if (request_info_.load_flags & LOAD_PREFETCH && !request_->was_cached()) 1497 UMA_HISTOGRAM_COUNTS("Net.Prefetch.PrefilterBytesReadFromNetwork", 1498 prefilter_bytes_read()); 1499 1500 start_time_ = base::TimeTicks(); 1501} 1502 1503void URLRequestHttpJob::DoneWithRequest(CompletionCause reason) { 1504 if (done_) 1505 return; 1506 done_ = true; 1507 RecordPerfHistograms(reason); 1508 if (reason == FINISHED) { 1509 request_->set_received_response_content_length(prefilter_bytes_read()); 1510 RecordCompressionHistograms(); 1511 } 1512} 1513 1514HttpResponseHeaders* URLRequestHttpJob::GetResponseHeaders() const { 1515 DCHECK(transaction_.get()); 1516 DCHECK(transaction_->GetResponseInfo()); 1517 return override_response_headers_.get() ? 1518 override_response_headers_.get() : 1519 transaction_->GetResponseInfo()->headers.get(); 1520} 1521 1522void URLRequestHttpJob::NotifyURLRequestDestroyed() { 1523 awaiting_callback_ = false; 1524} 1525 1526} // namespace net 1527