url_request_http_job.cc revision 6e8cce623b6e4fe0c9e4af605d675dd9d0338c38
1// Copyright (c) 2012 The Chromium Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "net/url_request/url_request_http_job.h" 6 7#include "base/base_switches.h" 8#include "base/bind.h" 9#include "base/bind_helpers.h" 10#include "base/command_line.h" 11#include "base/compiler_specific.h" 12#include "base/file_version_info.h" 13#include "base/message_loop/message_loop.h" 14#include "base/metrics/field_trial.h" 15#include "base/metrics/histogram.h" 16#include "base/rand_util.h" 17#include "base/strings/string_util.h" 18#include "base/time/time.h" 19#include "net/base/host_port_pair.h" 20#include "net/base/load_flags.h" 21#include "net/base/mime_util.h" 22#include "net/base/net_errors.h" 23#include "net/base/net_util.h" 24#include "net/base/network_delegate.h" 25#include "net/base/sdch_manager.h" 26#include "net/cert/cert_status_flags.h" 27#include "net/cookies/cookie_store.h" 28#include "net/http/http_content_disposition.h" 29#include "net/http/http_network_session.h" 30#include "net/http/http_request_headers.h" 31#include "net/http/http_response_headers.h" 32#include "net/http/http_response_info.h" 33#include "net/http/http_status_code.h" 34#include "net/http/http_transaction.h" 35#include "net/http/http_transaction_factory.h" 36#include "net/http/http_util.h" 37#include "net/proxy/proxy_info.h" 38#include "net/ssl/ssl_cert_request_info.h" 39#include "net/ssl/ssl_config_service.h" 40#include "net/url_request/fraudulent_certificate_reporter.h" 41#include "net/url_request/http_user_agent_settings.h" 42#include "net/url_request/url_request.h" 43#include "net/url_request/url_request_context.h" 44#include "net/url_request/url_request_error_job.h" 45#include "net/url_request/url_request_job_factory.h" 46#include "net/url_request/url_request_redirect_job.h" 47#include "net/url_request/url_request_throttler_header_adapter.h" 48#include "net/url_request/url_request_throttler_manager.h" 49#include "net/websockets/websocket_handshake_stream_base.h" 50 51static const char kAvailDictionaryHeader[] = "Avail-Dictionary"; 52 53namespace net { 54 55class URLRequestHttpJob::HttpFilterContext : public FilterContext { 56 public: 57 explicit HttpFilterContext(URLRequestHttpJob* job); 58 virtual ~HttpFilterContext(); 59 60 // FilterContext implementation. 61 virtual bool GetMimeType(std::string* mime_type) const OVERRIDE; 62 virtual bool GetURL(GURL* gurl) const OVERRIDE; 63 virtual bool GetContentDisposition(std::string* disposition) const OVERRIDE; 64 virtual base::Time GetRequestTime() const OVERRIDE; 65 virtual bool IsCachedContent() const OVERRIDE; 66 virtual bool IsDownload() const OVERRIDE; 67 virtual bool IsSdchResponse() const OVERRIDE; 68 virtual int64 GetByteReadCount() const OVERRIDE; 69 virtual int GetResponseCode() const OVERRIDE; 70 virtual const URLRequestContext* GetURLRequestContext() const OVERRIDE; 71 virtual void RecordPacketStats(StatisticSelector statistic) const OVERRIDE; 72 73 // Method to allow us to reset filter context for a response that should have 74 // been SDCH encoded when there is an update due to an explicit HTTP header. 75 void ResetSdchResponseToFalse(); 76 77 private: 78 URLRequestHttpJob* job_; 79 80 DISALLOW_COPY_AND_ASSIGN(HttpFilterContext); 81}; 82 83URLRequestHttpJob::HttpFilterContext::HttpFilterContext(URLRequestHttpJob* job) 84 : job_(job) { 85 DCHECK(job_); 86} 87 88URLRequestHttpJob::HttpFilterContext::~HttpFilterContext() { 89} 90 91bool URLRequestHttpJob::HttpFilterContext::GetMimeType( 92 std::string* mime_type) const { 93 return job_->GetMimeType(mime_type); 94} 95 96bool URLRequestHttpJob::HttpFilterContext::GetURL(GURL* gurl) const { 97 if (!job_->request()) 98 return false; 99 *gurl = job_->request()->url(); 100 return true; 101} 102 103bool URLRequestHttpJob::HttpFilterContext::GetContentDisposition( 104 std::string* disposition) const { 105 HttpResponseHeaders* headers = job_->GetResponseHeaders(); 106 void *iter = NULL; 107 return headers->EnumerateHeader(&iter, "Content-Disposition", disposition); 108} 109 110base::Time URLRequestHttpJob::HttpFilterContext::GetRequestTime() const { 111 return job_->request() ? job_->request()->request_time() : base::Time(); 112} 113 114bool URLRequestHttpJob::HttpFilterContext::IsCachedContent() const { 115 return job_->is_cached_content_; 116} 117 118bool URLRequestHttpJob::HttpFilterContext::IsDownload() const { 119 return (job_->request_info_.load_flags & LOAD_IS_DOWNLOAD) != 0; 120} 121 122void URLRequestHttpJob::HttpFilterContext::ResetSdchResponseToFalse() { 123 DCHECK(job_->sdch_dictionary_advertised_); 124 job_->sdch_dictionary_advertised_ = false; 125} 126 127bool URLRequestHttpJob::HttpFilterContext::IsSdchResponse() const { 128 return job_->sdch_dictionary_advertised_; 129} 130 131int64 URLRequestHttpJob::HttpFilterContext::GetByteReadCount() const { 132 return job_->filter_input_byte_count(); 133} 134 135int URLRequestHttpJob::HttpFilterContext::GetResponseCode() const { 136 return job_->GetResponseCode(); 137} 138 139const URLRequestContext* 140URLRequestHttpJob::HttpFilterContext::GetURLRequestContext() const { 141 return job_->request() ? job_->request()->context() : NULL; 142} 143 144void URLRequestHttpJob::HttpFilterContext::RecordPacketStats( 145 StatisticSelector statistic) const { 146 job_->RecordPacketStats(statistic); 147} 148 149// TODO(darin): make sure the port blocking code is not lost 150// static 151URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request, 152 NetworkDelegate* network_delegate, 153 const std::string& scheme) { 154 DCHECK(scheme == "http" || scheme == "https" || scheme == "ws" || 155 scheme == "wss"); 156 157 if (!request->context()->http_transaction_factory()) { 158 NOTREACHED() << "requires a valid context"; 159 return new URLRequestErrorJob( 160 request, network_delegate, ERR_INVALID_ARGUMENT); 161 } 162 163 GURL redirect_url; 164 if (request->GetHSTSRedirect(&redirect_url)) { 165 return new URLRequestRedirectJob( 166 request, network_delegate, redirect_url, 167 // Use status code 307 to preserve the method, so POST requests work. 168 URLRequestRedirectJob::REDIRECT_307_TEMPORARY_REDIRECT, "HSTS"); 169 } 170 return new URLRequestHttpJob(request, 171 network_delegate, 172 request->context()->http_user_agent_settings()); 173} 174 175URLRequestHttpJob::URLRequestHttpJob( 176 URLRequest* request, 177 NetworkDelegate* network_delegate, 178 const HttpUserAgentSettings* http_user_agent_settings) 179 : URLRequestJob(request, network_delegate), 180 priority_(DEFAULT_PRIORITY), 181 response_info_(NULL), 182 response_cookies_save_index_(0), 183 proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH), 184 server_auth_state_(AUTH_STATE_DONT_NEED_AUTH), 185 start_callback_(base::Bind(&URLRequestHttpJob::OnStartCompleted, 186 base::Unretained(this))), 187 notify_before_headers_sent_callback_( 188 base::Bind(&URLRequestHttpJob::NotifyBeforeSendHeadersCallback, 189 base::Unretained(this))), 190 read_in_progress_(false), 191 throttling_entry_(NULL), 192 sdch_dictionary_advertised_(false), 193 sdch_test_activated_(false), 194 sdch_test_control_(false), 195 is_cached_content_(false), 196 request_creation_time_(), 197 packet_timing_enabled_(false), 198 done_(false), 199 bytes_observed_in_packets_(0), 200 request_time_snapshot_(), 201 final_packet_time_(), 202 filter_context_(new HttpFilterContext(this)), 203 on_headers_received_callback_( 204 base::Bind(&URLRequestHttpJob::OnHeadersReceivedCallback, 205 base::Unretained(this))), 206 awaiting_callback_(false), 207 http_user_agent_settings_(http_user_agent_settings), 208 weak_factory_(this) { 209 URLRequestThrottlerManager* manager = request->context()->throttler_manager(); 210 if (manager) 211 throttling_entry_ = manager->RegisterRequestUrl(request->url()); 212 213 ResetTimer(); 214} 215 216URLRequestHttpJob::~URLRequestHttpJob() { 217 CHECK(!awaiting_callback_); 218 219 DCHECK(!sdch_test_control_ || !sdch_test_activated_); 220 if (!is_cached_content_) { 221 if (sdch_test_control_) 222 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK); 223 if (sdch_test_activated_) 224 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE); 225 } 226 // Make sure SDCH filters are told to emit histogram data while 227 // filter_context_ is still alive. 228 DestroyFilters(); 229 230 DoneWithRequest(ABORTED); 231} 232 233void URLRequestHttpJob::SetPriority(RequestPriority priority) { 234 priority_ = priority; 235 if (transaction_) 236 transaction_->SetPriority(priority_); 237} 238 239void URLRequestHttpJob::Start() { 240 DCHECK(!transaction_.get()); 241 242 // URLRequest::SetReferrer ensures that we do not send username and password 243 // fields in the referrer. 244 GURL referrer(request_->referrer()); 245 246 request_info_.url = request_->url(); 247 request_info_.method = request_->method(); 248 request_info_.load_flags = request_->load_flags(); 249 // Enable privacy mode if cookie settings or flags tell us not send or 250 // save cookies. 251 bool enable_privacy_mode = 252 (request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES) || 253 (request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) || 254 CanEnablePrivacyMode(); 255 // Privacy mode could still be disabled in OnCookiesLoaded if we are going 256 // to send previously saved cookies. 257 request_info_.privacy_mode = enable_privacy_mode ? 258 PRIVACY_MODE_ENABLED : PRIVACY_MODE_DISABLED; 259 260 // Strip Referer from request_info_.extra_headers to prevent, e.g., plugins 261 // from overriding headers that are controlled using other means. Otherwise a 262 // plugin could set a referrer although sending the referrer is inhibited. 263 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kReferer); 264 265 // Our consumer should have made sure that this is a safe referrer. See for 266 // instance WebCore::FrameLoader::HideReferrer. 267 if (referrer.is_valid()) { 268 request_info_.extra_headers.SetHeader(HttpRequestHeaders::kReferer, 269 referrer.spec()); 270 } 271 272 request_info_.extra_headers.SetHeaderIfMissing( 273 HttpRequestHeaders::kUserAgent, 274 http_user_agent_settings_ ? 275 http_user_agent_settings_->GetUserAgent() : std::string()); 276 277 AddExtraHeaders(); 278 AddCookieHeaderAndStart(); 279} 280 281void URLRequestHttpJob::Kill() { 282 if (!transaction_.get()) 283 return; 284 285 weak_factory_.InvalidateWeakPtrs(); 286 DestroyTransaction(); 287 URLRequestJob::Kill(); 288} 289 290void URLRequestHttpJob::NotifyBeforeSendProxyHeadersCallback( 291 const ProxyInfo& proxy_info, 292 HttpRequestHeaders* request_headers) { 293 DCHECK(request_headers); 294 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); 295 if (network_delegate()) { 296 network_delegate()->NotifyBeforeSendProxyHeaders( 297 request_, 298 proxy_info, 299 request_headers); 300 } 301} 302 303void URLRequestHttpJob::NotifyHeadersComplete() { 304 DCHECK(!response_info_); 305 306 response_info_ = transaction_->GetResponseInfo(); 307 308 // Save boolean, as we'll need this info at destruction time, and filters may 309 // also need this info. 310 is_cached_content_ = response_info_->was_cached; 311 312 if (!is_cached_content_ && throttling_entry_.get()) { 313 URLRequestThrottlerHeaderAdapter response_adapter(GetResponseHeaders()); 314 throttling_entry_->UpdateWithResponse(request_info_.url.host(), 315 &response_adapter); 316 } 317 318 // The ordering of these calls is not important. 319 ProcessStrictTransportSecurityHeader(); 320 ProcessPublicKeyPinsHeader(); 321 322 SdchManager* sdch_manager(request()->context()->sdch_manager()); 323 if (sdch_manager && sdch_manager->IsInSupportedDomain(request_->url())) { 324 const std::string name = "Get-Dictionary"; 325 std::string url_text; 326 void* iter = NULL; 327 // TODO(jar): We need to not fetch dictionaries the first time they are 328 // seen, but rather wait until we can justify their usefulness. 329 // For now, we will only fetch the first dictionary, which will at least 330 // require multiple suggestions before we get additional ones for this site. 331 // Eventually we should wait until a dictionary is requested several times 332 // before we even download it (so that we don't waste memory or bandwidth). 333 if (GetResponseHeaders()->EnumerateHeader(&iter, name, &url_text)) { 334 // Resolve suggested URL relative to request url. 335 GURL sdch_dictionary_url = request_->url().Resolve(url_text); 336 if (sdch_dictionary_url.is_valid()) { 337 sdch_manager->FetchDictionary(request_->url(), sdch_dictionary_url); 338 } 339 } 340 } 341 342 // The HTTP transaction may be restarted several times for the purposes 343 // of sending authorization information. Each time it restarts, we get 344 // notified of the headers completion so that we can update the cookie store. 345 if (transaction_->IsReadyToRestartForAuth()) { 346 DCHECK(!response_info_->auth_challenge.get()); 347 // TODO(battre): This breaks the webrequest API for 348 // URLRequestTestHTTP.BasicAuthWithCookies 349 // where OnBeforeSendHeaders -> OnSendHeaders -> OnBeforeSendHeaders 350 // occurs. 351 RestartTransactionWithAuth(AuthCredentials()); 352 return; 353 } 354 355 URLRequestJob::NotifyHeadersComplete(); 356} 357 358void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) { 359 DoneWithRequest(FINISHED); 360 URLRequestJob::NotifyDone(status); 361} 362 363void URLRequestHttpJob::DestroyTransaction() { 364 DCHECK(transaction_.get()); 365 366 DoneWithRequest(ABORTED); 367 transaction_.reset(); 368 response_info_ = NULL; 369 receive_headers_end_ = base::TimeTicks(); 370} 371 372void URLRequestHttpJob::StartTransaction() { 373 if (network_delegate()) { 374 OnCallToDelegate(); 375 int rv = network_delegate()->NotifyBeforeSendHeaders( 376 request_, notify_before_headers_sent_callback_, 377 &request_info_.extra_headers); 378 // If an extension blocks the request, we rely on the callback to 379 // MaybeStartTransactionInternal(). 380 if (rv == ERR_IO_PENDING) 381 return; 382 MaybeStartTransactionInternal(rv); 383 return; 384 } 385 StartTransactionInternal(); 386} 387 388void URLRequestHttpJob::NotifyBeforeSendHeadersCallback(int result) { 389 // Check that there are no callbacks to already canceled requests. 390 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); 391 392 MaybeStartTransactionInternal(result); 393} 394 395void URLRequestHttpJob::MaybeStartTransactionInternal(int result) { 396 OnCallToDelegateComplete(); 397 if (result == OK) { 398 StartTransactionInternal(); 399 } else { 400 std::string source("delegate"); 401 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 402 NetLog::StringCallback("source", &source)); 403 NotifyCanceled(); 404 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 405 } 406} 407 408void URLRequestHttpJob::StartTransactionInternal() { 409 // NOTE: This method assumes that request_info_ is already setup properly. 410 411 // If we already have a transaction, then we should restart the transaction 412 // with auth provided by auth_credentials_. 413 414 int rv; 415 416 if (network_delegate()) { 417 network_delegate()->NotifySendHeaders( 418 request_, request_info_.extra_headers); 419 } 420 421 if (transaction_.get()) { 422 rv = transaction_->RestartWithAuth(auth_credentials_, start_callback_); 423 auth_credentials_ = AuthCredentials(); 424 } else { 425 DCHECK(request_->context()->http_transaction_factory()); 426 427 rv = request_->context()->http_transaction_factory()->CreateTransaction( 428 priority_, &transaction_); 429 430 if (rv == OK && request_info_.url.SchemeIsWSOrWSS()) { 431 base::SupportsUserData::Data* data = request_->GetUserData( 432 WebSocketHandshakeStreamBase::CreateHelper::DataKey()); 433 if (data) { 434 transaction_->SetWebSocketHandshakeStreamCreateHelper( 435 static_cast<WebSocketHandshakeStreamBase::CreateHelper*>(data)); 436 } else { 437 rv = ERR_DISALLOWED_URL_SCHEME; 438 } 439 } 440 441 if (rv == OK) { 442 transaction_->SetBeforeNetworkStartCallback( 443 base::Bind(&URLRequestHttpJob::NotifyBeforeNetworkStart, 444 base::Unretained(this))); 445 transaction_->SetBeforeProxyHeadersSentCallback( 446 base::Bind(&URLRequestHttpJob::NotifyBeforeSendProxyHeadersCallback, 447 base::Unretained(this))); 448 449 if (!throttling_entry_.get() || 450 !throttling_entry_->ShouldRejectRequest(*request_)) { 451 rv = transaction_->Start( 452 &request_info_, start_callback_, request_->net_log()); 453 start_time_ = base::TimeTicks::Now(); 454 } else { 455 // Special error code for the exponential back-off module. 456 rv = ERR_TEMPORARILY_THROTTLED; 457 } 458 } 459 } 460 461 if (rv == ERR_IO_PENDING) 462 return; 463 464 // The transaction started synchronously, but we need to notify the 465 // URLRequest delegate via the message loop. 466 base::MessageLoop::current()->PostTask( 467 FROM_HERE, 468 base::Bind(&URLRequestHttpJob::OnStartCompleted, 469 weak_factory_.GetWeakPtr(), rv)); 470} 471 472void URLRequestHttpJob::AddExtraHeaders() { 473 SdchManager* sdch_manager = request()->context()->sdch_manager(); 474 475 // Supply Accept-Encoding field only if it is not already provided. 476 // It should be provided IF the content is known to have restrictions on 477 // potential encoding, such as streaming multi-media. 478 // For details see bug 47381. 479 // TODO(jar, enal): jpeg files etc. should set up a request header if 480 // possible. Right now it is done only by buffered_resource_loader and 481 // simple_data_source. 482 if (!request_info_.extra_headers.HasHeader( 483 HttpRequestHeaders::kAcceptEncoding)) { 484 bool advertise_sdch = sdch_manager && 485 // We don't support SDCH responses to POST as there is a possibility 486 // of having SDCH encoded responses returned (e.g. by the cache) 487 // which we cannot decode, and in those situations, we will need 488 // to retransmit the request without SDCH, which is illegal for a POST. 489 request()->method() != "POST" && 490 sdch_manager->IsInSupportedDomain(request_->url()); 491 std::string avail_dictionaries; 492 if (advertise_sdch) { 493 sdch_manager->GetAvailDictionaryList(request_->url(), 494 &avail_dictionaries); 495 496 // The AllowLatencyExperiment() is only true if we've successfully done a 497 // full SDCH compression recently in this browser session for this host. 498 // Note that for this path, there might be no applicable dictionaries, 499 // and hence we can't participate in the experiment. 500 if (!avail_dictionaries.empty() && 501 sdch_manager->AllowLatencyExperiment(request_->url())) { 502 // We are participating in the test (or control), and hence we'll 503 // eventually record statistics via either SDCH_EXPERIMENT_DECODE or 504 // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data. 505 packet_timing_enabled_ = true; 506 if (base::RandDouble() < .01) { 507 sdch_test_control_ = true; // 1% probability. 508 advertise_sdch = false; 509 } else { 510 sdch_test_activated_ = true; 511 } 512 } 513 } 514 515 // Supply Accept-Encoding headers first so that it is more likely that they 516 // will be in the first transmitted packet. This can sometimes make it 517 // easier to filter and analyze the streams to assure that a proxy has not 518 // damaged these headers. Some proxies deliberately corrupt Accept-Encoding 519 // headers. 520 if (!advertise_sdch) { 521 // Tell the server what compression formats we support (other than SDCH). 522 request_info_.extra_headers.SetHeader( 523 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate"); 524 } else { 525 // Include SDCH in acceptable list. 526 request_info_.extra_headers.SetHeader( 527 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch"); 528 if (!avail_dictionaries.empty()) { 529 request_info_.extra_headers.SetHeader( 530 kAvailDictionaryHeader, 531 avail_dictionaries); 532 sdch_dictionary_advertised_ = true; 533 // Since we're tagging this transaction as advertising a dictionary, 534 // we'll definitely employ an SDCH filter (or tentative sdch filter) 535 // when we get a response. When done, we'll record histograms via 536 // SDCH_DECODE or SDCH_PASSTHROUGH. Hence we need to record packet 537 // arrival times. 538 packet_timing_enabled_ = true; 539 } 540 } 541 } 542 543 if (http_user_agent_settings_) { 544 // Only add default Accept-Language if the request didn't have it 545 // specified. 546 std::string accept_language = 547 http_user_agent_settings_->GetAcceptLanguage(); 548 if (!accept_language.empty()) { 549 request_info_.extra_headers.SetHeaderIfMissing( 550 HttpRequestHeaders::kAcceptLanguage, 551 accept_language); 552 } 553 } 554} 555 556void URLRequestHttpJob::AddCookieHeaderAndStart() { 557 // No matter what, we want to report our status as IO pending since we will 558 // be notifying our consumer asynchronously via OnStartCompleted. 559 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 560 561 // If the request was destroyed, then there is no more work to do. 562 if (!request_) 563 return; 564 565 CookieStore* cookie_store = GetCookieStore(); 566 if (cookie_store && !(request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES)) { 567 cookie_store->GetAllCookiesForURLAsync( 568 request_->url(), 569 base::Bind(&URLRequestHttpJob::CheckCookiePolicyAndLoad, 570 weak_factory_.GetWeakPtr())); 571 } else { 572 DoStartTransaction(); 573 } 574} 575 576void URLRequestHttpJob::DoLoadCookies() { 577 CookieOptions options; 578 options.set_include_httponly(); 579 GetCookieStore()->GetCookiesWithOptionsAsync( 580 request_->url(), options, 581 base::Bind(&URLRequestHttpJob::OnCookiesLoaded, 582 weak_factory_.GetWeakPtr())); 583} 584 585void URLRequestHttpJob::CheckCookiePolicyAndLoad( 586 const CookieList& cookie_list) { 587 if (CanGetCookies(cookie_list)) 588 DoLoadCookies(); 589 else 590 DoStartTransaction(); 591} 592 593void URLRequestHttpJob::OnCookiesLoaded(const std::string& cookie_line) { 594 if (!cookie_line.empty()) { 595 request_info_.extra_headers.SetHeader( 596 HttpRequestHeaders::kCookie, cookie_line); 597 // Disable privacy mode as we are sending cookies anyway. 598 request_info_.privacy_mode = PRIVACY_MODE_DISABLED; 599 } 600 DoStartTransaction(); 601} 602 603void URLRequestHttpJob::DoStartTransaction() { 604 // We may have been canceled while retrieving cookies. 605 if (GetStatus().is_success()) { 606 StartTransaction(); 607 } else { 608 NotifyCanceled(); 609 } 610} 611 612void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete(int result) { 613 // End of the call started in OnStartCompleted. 614 OnCallToDelegateComplete(); 615 616 if (result != net::OK) { 617 std::string source("delegate"); 618 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 619 NetLog::StringCallback("source", &source)); 620 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 621 return; 622 } 623 624 DCHECK(transaction_.get()); 625 626 const HttpResponseInfo* response_info = transaction_->GetResponseInfo(); 627 DCHECK(response_info); 628 629 response_cookies_.clear(); 630 response_cookies_save_index_ = 0; 631 632 FetchResponseCookies(&response_cookies_); 633 634 if (!GetResponseHeaders()->GetDateValue(&response_date_)) 635 response_date_ = base::Time(); 636 637 // Now, loop over the response cookies, and attempt to persist each. 638 SaveNextCookie(); 639} 640 641// If the save occurs synchronously, SaveNextCookie will loop and save the next 642// cookie. If the save is deferred, the callback is responsible for continuing 643// to iterate through the cookies. 644// TODO(erikwright): Modify the CookieStore API to indicate via return value 645// whether it completed synchronously or asynchronously. 646// See http://crbug.com/131066. 647void URLRequestHttpJob::SaveNextCookie() { 648 // No matter what, we want to report our status as IO pending since we will 649 // be notifying our consumer asynchronously via OnStartCompleted. 650 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 651 652 // Used to communicate with the callback. See the implementation of 653 // OnCookieSaved. 654 scoped_refptr<SharedBoolean> callback_pending = new SharedBoolean(false); 655 scoped_refptr<SharedBoolean> save_next_cookie_running = 656 new SharedBoolean(true); 657 658 if (!(request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) && 659 GetCookieStore() && response_cookies_.size() > 0) { 660 CookieOptions options; 661 options.set_include_httponly(); 662 options.set_server_time(response_date_); 663 664 net::CookieStore::SetCookiesCallback callback( 665 base::Bind(&URLRequestHttpJob::OnCookieSaved, 666 weak_factory_.GetWeakPtr(), 667 save_next_cookie_running, 668 callback_pending)); 669 670 // Loop through the cookies as long as SetCookieWithOptionsAsync completes 671 // synchronously. 672 while (!callback_pending->data && 673 response_cookies_save_index_ < response_cookies_.size()) { 674 if (CanSetCookie( 675 response_cookies_[response_cookies_save_index_], &options)) { 676 callback_pending->data = true; 677 GetCookieStore()->SetCookieWithOptionsAsync( 678 request_->url(), response_cookies_[response_cookies_save_index_], 679 options, callback); 680 } 681 ++response_cookies_save_index_; 682 } 683 } 684 685 save_next_cookie_running->data = false; 686 687 if (!callback_pending->data) { 688 response_cookies_.clear(); 689 response_cookies_save_index_ = 0; 690 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status 691 NotifyHeadersComplete(); 692 return; 693 } 694} 695 696// |save_next_cookie_running| is true when the callback is bound and set to 697// false when SaveNextCookie exits, allowing the callback to determine if the 698// save occurred synchronously or asynchronously. 699// |callback_pending| is false when the callback is invoked and will be set to 700// true by the callback, allowing SaveNextCookie to detect whether the save 701// occurred synchronously. 702// See SaveNextCookie() for more information. 703void URLRequestHttpJob::OnCookieSaved( 704 scoped_refptr<SharedBoolean> save_next_cookie_running, 705 scoped_refptr<SharedBoolean> callback_pending, 706 bool cookie_status) { 707 callback_pending->data = false; 708 709 // If we were called synchronously, return. 710 if (save_next_cookie_running->data) { 711 return; 712 } 713 714 // We were called asynchronously, so trigger the next save. 715 // We may have been canceled within OnSetCookie. 716 if (GetStatus().is_success()) { 717 SaveNextCookie(); 718 } else { 719 NotifyCanceled(); 720 } 721} 722 723void URLRequestHttpJob::FetchResponseCookies( 724 std::vector<std::string>* cookies) { 725 const std::string name = "Set-Cookie"; 726 std::string value; 727 728 void* iter = NULL; 729 HttpResponseHeaders* headers = GetResponseHeaders(); 730 while (headers->EnumerateHeader(&iter, name, &value)) { 731 if (!value.empty()) 732 cookies->push_back(value); 733 } 734} 735 736// NOTE: |ProcessStrictTransportSecurityHeader| and 737// |ProcessPublicKeyPinsHeader| have very similar structures, by design. 738void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() { 739 DCHECK(response_info_); 740 TransportSecurityState* security_state = 741 request_->context()->transport_security_state(); 742 const SSLInfo& ssl_info = response_info_->ssl_info; 743 744 // Only accept HSTS headers on HTTPS connections that have no 745 // certificate errors. 746 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) || 747 !security_state) 748 return; 749 750 // http://tools.ietf.org/html/draft-ietf-websec-strict-transport-sec: 751 // 752 // If a UA receives more than one STS header field in a HTTP response 753 // message over secure transport, then the UA MUST process only the 754 // first such header field. 755 HttpResponseHeaders* headers = GetResponseHeaders(); 756 std::string value; 757 if (headers->EnumerateHeader(NULL, "Strict-Transport-Security", &value)) 758 security_state->AddHSTSHeader(request_info_.url.host(), value); 759} 760 761void URLRequestHttpJob::ProcessPublicKeyPinsHeader() { 762 DCHECK(response_info_); 763 TransportSecurityState* security_state = 764 request_->context()->transport_security_state(); 765 const SSLInfo& ssl_info = response_info_->ssl_info; 766 767 // Only accept HPKP headers on HTTPS connections that have no 768 // certificate errors. 769 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) || 770 !security_state) 771 return; 772 773 // http://tools.ietf.org/html/draft-ietf-websec-key-pinning: 774 // 775 // If a UA receives more than one PKP header field in an HTTP 776 // response message over secure transport, then the UA MUST process 777 // only the first such header field. 778 HttpResponseHeaders* headers = GetResponseHeaders(); 779 std::string value; 780 if (headers->EnumerateHeader(NULL, "Public-Key-Pins", &value)) 781 security_state->AddHPKPHeader(request_info_.url.host(), value, ssl_info); 782} 783 784void URLRequestHttpJob::OnStartCompleted(int result) { 785 RecordTimer(); 786 787 // If the request was destroyed, then there is no more work to do. 788 if (!request_) 789 return; 790 791 // If the job is done (due to cancellation), can just ignore this 792 // notification. 793 if (done_) 794 return; 795 796 receive_headers_end_ = base::TimeTicks::Now(); 797 798 // Clear the IO_PENDING status 799 SetStatus(URLRequestStatus()); 800 801 const URLRequestContext* context = request_->context(); 802 803 if (result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN && 804 transaction_->GetResponseInfo() != NULL) { 805 FraudulentCertificateReporter* reporter = 806 context->fraudulent_certificate_reporter(); 807 if (reporter != NULL) { 808 const SSLInfo& ssl_info = transaction_->GetResponseInfo()->ssl_info; 809 bool sni_available = SSLConfigService::IsSNIAvailable( 810 context->ssl_config_service()); 811 const std::string& host = request_->url().host(); 812 813 reporter->SendReport(host, ssl_info, sni_available); 814 } 815 } 816 817 if (result == OK) { 818 if (transaction_ && transaction_->GetResponseInfo()) { 819 SetProxyServer(transaction_->GetResponseInfo()->proxy_server); 820 } 821 scoped_refptr<HttpResponseHeaders> headers = GetResponseHeaders(); 822 if (network_delegate()) { 823 // Note that |this| may not be deleted until 824 // |on_headers_received_callback_| or 825 // |NetworkDelegate::URLRequestDestroyed()| has been called. 826 OnCallToDelegate(); 827 allowed_unsafe_redirect_url_ = GURL(); 828 int error = network_delegate()->NotifyHeadersReceived( 829 request_, 830 on_headers_received_callback_, 831 headers.get(), 832 &override_response_headers_, 833 &allowed_unsafe_redirect_url_); 834 if (error != net::OK) { 835 if (error == net::ERR_IO_PENDING) { 836 awaiting_callback_ = true; 837 } else { 838 std::string source("delegate"); 839 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 840 NetLog::StringCallback("source", 841 &source)); 842 OnCallToDelegateComplete(); 843 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, error)); 844 } 845 return; 846 } 847 } 848 849 SaveCookiesAndNotifyHeadersComplete(net::OK); 850 } else if (IsCertificateError(result)) { 851 // We encountered an SSL certificate error. 852 if (result == ERR_SSL_WEAK_SERVER_EPHEMERAL_DH_KEY || 853 result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN) { 854 // These are hard failures. They're handled separately and don't have 855 // the correct cert status, so set it here. 856 SSLInfo info(transaction_->GetResponseInfo()->ssl_info); 857 info.cert_status = MapNetErrorToCertStatus(result); 858 NotifySSLCertificateError(info, true); 859 } else { 860 // Maybe overridable, maybe not. Ask the delegate to decide. 861 const URLRequestContext* context = request_->context(); 862 TransportSecurityState* state = context->transport_security_state(); 863 const bool fatal = 864 state && 865 state->ShouldSSLErrorsBeFatal( 866 request_info_.url.host(), 867 SSLConfigService::IsSNIAvailable(context->ssl_config_service())); 868 NotifySSLCertificateError( 869 transaction_->GetResponseInfo()->ssl_info, fatal); 870 } 871 } else if (result == ERR_SSL_CLIENT_AUTH_CERT_NEEDED) { 872 NotifyCertificateRequested( 873 transaction_->GetResponseInfo()->cert_request_info.get()); 874 } else { 875 // Even on an error, there may be useful information in the response 876 // info (e.g. whether there's a cached copy). 877 if (transaction_.get()) 878 response_info_ = transaction_->GetResponseInfo(); 879 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 880 } 881} 882 883void URLRequestHttpJob::OnHeadersReceivedCallback(int result) { 884 awaiting_callback_ = false; 885 886 // Check that there are no callbacks to already canceled requests. 887 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); 888 889 SaveCookiesAndNotifyHeadersComplete(result); 890} 891 892void URLRequestHttpJob::OnReadCompleted(int result) { 893 read_in_progress_ = false; 894 895 if (ShouldFixMismatchedContentLength(result)) 896 result = OK; 897 898 if (result == OK) { 899 NotifyDone(URLRequestStatus()); 900 } else if (result < 0) { 901 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); 902 } else { 903 // Clear the IO_PENDING status 904 SetStatus(URLRequestStatus()); 905 } 906 907 NotifyReadComplete(result); 908} 909 910void URLRequestHttpJob::RestartTransactionWithAuth( 911 const AuthCredentials& credentials) { 912 auth_credentials_ = credentials; 913 914 // These will be reset in OnStartCompleted. 915 response_info_ = NULL; 916 receive_headers_end_ = base::TimeTicks(); 917 response_cookies_.clear(); 918 919 ResetTimer(); 920 921 // Update the cookies, since the cookie store may have been updated from the 922 // headers in the 401/407. Since cookies were already appended to 923 // extra_headers, we need to strip them out before adding them again. 924 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kCookie); 925 926 AddCookieHeaderAndStart(); 927} 928 929void URLRequestHttpJob::SetUpload(UploadDataStream* upload) { 930 DCHECK(!transaction_.get()) << "cannot change once started"; 931 request_info_.upload_data_stream = upload; 932} 933 934void URLRequestHttpJob::SetExtraRequestHeaders( 935 const HttpRequestHeaders& headers) { 936 DCHECK(!transaction_.get()) << "cannot change once started"; 937 request_info_.extra_headers.CopyFrom(headers); 938} 939 940LoadState URLRequestHttpJob::GetLoadState() const { 941 return transaction_.get() ? 942 transaction_->GetLoadState() : LOAD_STATE_IDLE; 943} 944 945UploadProgress URLRequestHttpJob::GetUploadProgress() const { 946 return transaction_.get() ? 947 transaction_->GetUploadProgress() : UploadProgress(); 948} 949 950bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const { 951 DCHECK(transaction_.get()); 952 953 if (!response_info_) 954 return false; 955 956 return GetResponseHeaders()->GetMimeType(mime_type); 957} 958 959bool URLRequestHttpJob::GetCharset(std::string* charset) { 960 DCHECK(transaction_.get()); 961 962 if (!response_info_) 963 return false; 964 965 return GetResponseHeaders()->GetCharset(charset); 966} 967 968void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) { 969 DCHECK(request_); 970 971 if (response_info_) { 972 DCHECK(transaction_.get()); 973 974 *info = *response_info_; 975 if (override_response_headers_.get()) 976 info->headers = override_response_headers_; 977 } 978} 979 980void URLRequestHttpJob::GetLoadTimingInfo( 981 LoadTimingInfo* load_timing_info) const { 982 // If haven't made it far enough to receive any headers, don't return 983 // anything. This makes for more consistent behavior in the case of errors. 984 if (!transaction_ || receive_headers_end_.is_null()) 985 return; 986 if (transaction_->GetLoadTimingInfo(load_timing_info)) 987 load_timing_info->receive_headers_end = receive_headers_end_; 988} 989 990bool URLRequestHttpJob::GetResponseCookies(std::vector<std::string>* cookies) { 991 DCHECK(transaction_.get()); 992 993 if (!response_info_) 994 return false; 995 996 // TODO(darin): Why are we extracting response cookies again? Perhaps we 997 // should just leverage response_cookies_. 998 999 cookies->clear(); 1000 FetchResponseCookies(cookies); 1001 return true; 1002} 1003 1004int URLRequestHttpJob::GetResponseCode() const { 1005 DCHECK(transaction_.get()); 1006 1007 if (!response_info_) 1008 return -1; 1009 1010 return GetResponseHeaders()->response_code(); 1011} 1012 1013Filter* URLRequestHttpJob::SetupFilter() const { 1014 DCHECK(transaction_.get()); 1015 if (!response_info_) 1016 return NULL; 1017 1018 std::vector<Filter::FilterType> encoding_types; 1019 std::string encoding_type; 1020 HttpResponseHeaders* headers = GetResponseHeaders(); 1021 void* iter = NULL; 1022 while (headers->EnumerateHeader(&iter, "Content-Encoding", &encoding_type)) { 1023 encoding_types.push_back(Filter::ConvertEncodingToType(encoding_type)); 1024 } 1025 1026 if (filter_context_->IsSdchResponse()) { 1027 // We are wary of proxies that discard or damage SDCH encoding. If a server 1028 // explicitly states that this is not SDCH content, then we can correct our 1029 // assumption that this is an SDCH response, and avoid the need to recover 1030 // as though the content is corrupted (when we discover it is not SDCH 1031 // encoded). 1032 std::string sdch_response_status; 1033 iter = NULL; 1034 while (headers->EnumerateHeader(&iter, "X-Sdch-Encode", 1035 &sdch_response_status)) { 1036 if (sdch_response_status == "0") { 1037 filter_context_->ResetSdchResponseToFalse(); 1038 break; 1039 } 1040 } 1041 } 1042 1043 // Even if encoding types are empty, there is a chance that we need to add 1044 // some decoding, as some proxies strip encoding completely. In such cases, 1045 // we may need to add (for example) SDCH filtering (when the context suggests 1046 // it is appropriate). 1047 Filter::FixupEncodingTypes(*filter_context_, &encoding_types); 1048 1049 return !encoding_types.empty() 1050 ? Filter::Factory(encoding_types, *filter_context_) : NULL; 1051} 1052 1053bool URLRequestHttpJob::CopyFragmentOnRedirect(const GURL& location) const { 1054 // Allow modification of reference fragments by default, unless 1055 // |allowed_unsafe_redirect_url_| is set and equal to the redirect URL. 1056 // When this is the case, we assume that the network delegate has set the 1057 // desired redirect URL (with or without fragment), so it must not be changed 1058 // any more. 1059 return !allowed_unsafe_redirect_url_.is_valid() || 1060 allowed_unsafe_redirect_url_ != location; 1061} 1062 1063bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) { 1064 // HTTP is always safe. 1065 // TODO(pauljensen): Remove once crbug.com/146591 is fixed. 1066 if (location.is_valid() && 1067 (location.scheme() == "http" || location.scheme() == "https")) { 1068 return true; 1069 } 1070 // Delegates may mark a URL as safe for redirection. 1071 if (allowed_unsafe_redirect_url_.is_valid() && 1072 allowed_unsafe_redirect_url_ == location) { 1073 return true; 1074 } 1075 // Query URLRequestJobFactory as to whether |location| would be safe to 1076 // redirect to. 1077 return request_->context()->job_factory() && 1078 request_->context()->job_factory()->IsSafeRedirectTarget(location); 1079} 1080 1081bool URLRequestHttpJob::NeedsAuth() { 1082 int code = GetResponseCode(); 1083 if (code == -1) 1084 return false; 1085 1086 // Check if we need either Proxy or WWW Authentication. This could happen 1087 // because we either provided no auth info, or provided incorrect info. 1088 switch (code) { 1089 case 407: 1090 if (proxy_auth_state_ == AUTH_STATE_CANCELED) 1091 return false; 1092 proxy_auth_state_ = AUTH_STATE_NEED_AUTH; 1093 return true; 1094 case 401: 1095 if (server_auth_state_ == AUTH_STATE_CANCELED) 1096 return false; 1097 server_auth_state_ = AUTH_STATE_NEED_AUTH; 1098 return true; 1099 } 1100 return false; 1101} 1102 1103void URLRequestHttpJob::GetAuthChallengeInfo( 1104 scoped_refptr<AuthChallengeInfo>* result) { 1105 DCHECK(transaction_.get()); 1106 DCHECK(response_info_); 1107 1108 // sanity checks: 1109 DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH || 1110 server_auth_state_ == AUTH_STATE_NEED_AUTH); 1111 DCHECK((GetResponseHeaders()->response_code() == HTTP_UNAUTHORIZED) || 1112 (GetResponseHeaders()->response_code() == 1113 HTTP_PROXY_AUTHENTICATION_REQUIRED)); 1114 1115 *result = response_info_->auth_challenge; 1116} 1117 1118void URLRequestHttpJob::SetAuth(const AuthCredentials& credentials) { 1119 DCHECK(transaction_.get()); 1120 1121 // Proxy gets set first, then WWW. 1122 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { 1123 proxy_auth_state_ = AUTH_STATE_HAVE_AUTH; 1124 } else { 1125 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); 1126 server_auth_state_ = AUTH_STATE_HAVE_AUTH; 1127 } 1128 1129 RestartTransactionWithAuth(credentials); 1130} 1131 1132void URLRequestHttpJob::CancelAuth() { 1133 // Proxy gets set first, then WWW. 1134 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { 1135 proxy_auth_state_ = AUTH_STATE_CANCELED; 1136 } else { 1137 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); 1138 server_auth_state_ = AUTH_STATE_CANCELED; 1139 } 1140 1141 // These will be reset in OnStartCompleted. 1142 response_info_ = NULL; 1143 receive_headers_end_ = base::TimeTicks::Now(); 1144 response_cookies_.clear(); 1145 1146 ResetTimer(); 1147 1148 // OK, let the consumer read the error page... 1149 // 1150 // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false, 1151 // which will cause the consumer to receive OnResponseStarted instead of 1152 // OnAuthRequired. 1153 // 1154 // We have to do this via InvokeLater to avoid "recursing" the consumer. 1155 // 1156 base::MessageLoop::current()->PostTask( 1157 FROM_HERE, 1158 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1159 weak_factory_.GetWeakPtr(), OK)); 1160} 1161 1162void URLRequestHttpJob::ContinueWithCertificate( 1163 X509Certificate* client_cert) { 1164 DCHECK(transaction_.get()); 1165 1166 DCHECK(!response_info_) << "should not have a response yet"; 1167 receive_headers_end_ = base::TimeTicks(); 1168 1169 ResetTimer(); 1170 1171 // No matter what, we want to report our status as IO pending since we will 1172 // be notifying our consumer asynchronously via OnStartCompleted. 1173 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1174 1175 int rv = transaction_->RestartWithCertificate(client_cert, start_callback_); 1176 if (rv == ERR_IO_PENDING) 1177 return; 1178 1179 // The transaction started synchronously, but we need to notify the 1180 // URLRequest delegate via the message loop. 1181 base::MessageLoop::current()->PostTask( 1182 FROM_HERE, 1183 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1184 weak_factory_.GetWeakPtr(), rv)); 1185} 1186 1187void URLRequestHttpJob::ContinueDespiteLastError() { 1188 // If the transaction was destroyed, then the job was cancelled. 1189 if (!transaction_.get()) 1190 return; 1191 1192 DCHECK(!response_info_) << "should not have a response yet"; 1193 receive_headers_end_ = base::TimeTicks(); 1194 1195 ResetTimer(); 1196 1197 // No matter what, we want to report our status as IO pending since we will 1198 // be notifying our consumer asynchronously via OnStartCompleted. 1199 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1200 1201 int rv = transaction_->RestartIgnoringLastError(start_callback_); 1202 if (rv == ERR_IO_PENDING) 1203 return; 1204 1205 // The transaction started synchronously, but we need to notify the 1206 // URLRequest delegate via the message loop. 1207 base::MessageLoop::current()->PostTask( 1208 FROM_HERE, 1209 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1210 weak_factory_.GetWeakPtr(), rv)); 1211} 1212 1213void URLRequestHttpJob::ResumeNetworkStart() { 1214 DCHECK(transaction_.get()); 1215 transaction_->ResumeNetworkStart(); 1216} 1217 1218bool URLRequestHttpJob::ShouldFixMismatchedContentLength(int rv) const { 1219 // Some servers send the body compressed, but specify the content length as 1220 // the uncompressed size. Although this violates the HTTP spec we want to 1221 // support it (as IE and FireFox do), but *only* for an exact match. 1222 // See http://crbug.com/79694. 1223 if (rv == net::ERR_CONTENT_LENGTH_MISMATCH || 1224 rv == net::ERR_INCOMPLETE_CHUNKED_ENCODING) { 1225 if (request_ && request_->response_headers()) { 1226 int64 expected_length = request_->response_headers()->GetContentLength(); 1227 VLOG(1) << __FUNCTION__ << "() " 1228 << "\"" << request_->url().spec() << "\"" 1229 << " content-length = " << expected_length 1230 << " pre total = " << prefilter_bytes_read() 1231 << " post total = " << postfilter_bytes_read(); 1232 if (postfilter_bytes_read() == expected_length) { 1233 // Clear the error. 1234 return true; 1235 } 1236 } 1237 } 1238 return false; 1239} 1240 1241bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size, 1242 int* bytes_read) { 1243 DCHECK_NE(buf_size, 0); 1244 DCHECK(bytes_read); 1245 DCHECK(!read_in_progress_); 1246 1247 int rv = transaction_->Read( 1248 buf, buf_size, 1249 base::Bind(&URLRequestHttpJob::OnReadCompleted, base::Unretained(this))); 1250 1251 if (ShouldFixMismatchedContentLength(rv)) 1252 rv = 0; 1253 1254 if (rv >= 0) { 1255 *bytes_read = rv; 1256 if (!rv) 1257 DoneWithRequest(FINISHED); 1258 return true; 1259 } 1260 1261 if (rv == ERR_IO_PENDING) { 1262 read_in_progress_ = true; 1263 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1264 } else { 1265 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); 1266 } 1267 1268 return false; 1269} 1270 1271void URLRequestHttpJob::StopCaching() { 1272 if (transaction_.get()) 1273 transaction_->StopCaching(); 1274} 1275 1276bool URLRequestHttpJob::GetFullRequestHeaders( 1277 HttpRequestHeaders* headers) const { 1278 if (!transaction_) 1279 return false; 1280 1281 return transaction_->GetFullRequestHeaders(headers); 1282} 1283 1284int64 URLRequestHttpJob::GetTotalReceivedBytes() const { 1285 if (!transaction_) 1286 return 0; 1287 1288 return transaction_->GetTotalReceivedBytes(); 1289} 1290 1291void URLRequestHttpJob::DoneReading() { 1292 if (transaction_) { 1293 transaction_->DoneReading(); 1294 } 1295 DoneWithRequest(FINISHED); 1296} 1297 1298void URLRequestHttpJob::DoneReadingRedirectResponse() { 1299 if (transaction_) { 1300 if (transaction_->GetResponseInfo()->headers->IsRedirect(NULL)) { 1301 // If the original headers indicate a redirect, go ahead and cache the 1302 // response, even if the |override_response_headers_| are a redirect to 1303 // another location. 1304 transaction_->DoneReading(); 1305 } else { 1306 // Otherwise, |override_response_headers_| must be non-NULL and contain 1307 // bogus headers indicating a redirect. 1308 DCHECK(override_response_headers_); 1309 DCHECK(override_response_headers_->IsRedirect(NULL)); 1310 transaction_->StopCaching(); 1311 } 1312 } 1313 DoneWithRequest(FINISHED); 1314} 1315 1316HostPortPair URLRequestHttpJob::GetSocketAddress() const { 1317 return response_info_ ? response_info_->socket_address : HostPortPair(); 1318} 1319 1320void URLRequestHttpJob::RecordTimer() { 1321 if (request_creation_time_.is_null()) { 1322 NOTREACHED() 1323 << "The same transaction shouldn't start twice without new timing."; 1324 return; 1325 } 1326 1327 base::TimeDelta to_start = base::Time::Now() - request_creation_time_; 1328 request_creation_time_ = base::Time(); 1329 1330 UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte", to_start); 1331} 1332 1333void URLRequestHttpJob::ResetTimer() { 1334 if (!request_creation_time_.is_null()) { 1335 NOTREACHED() 1336 << "The timer was reset before it was recorded."; 1337 return; 1338 } 1339 request_creation_time_ = base::Time::Now(); 1340} 1341 1342void URLRequestHttpJob::UpdatePacketReadTimes() { 1343 if (!packet_timing_enabled_) 1344 return; 1345 1346 if (filter_input_byte_count() <= bytes_observed_in_packets_) { 1347 DCHECK_EQ(filter_input_byte_count(), bytes_observed_in_packets_); 1348 return; // No new bytes have arrived. 1349 } 1350 1351 base::Time now(base::Time::Now()); 1352 if (!bytes_observed_in_packets_) 1353 request_time_snapshot_ = now; 1354 final_packet_time_ = now; 1355 1356 bytes_observed_in_packets_ = filter_input_byte_count(); 1357} 1358 1359void URLRequestHttpJob::RecordPacketStats( 1360 FilterContext::StatisticSelector statistic) const { 1361 if (!packet_timing_enabled_ || (final_packet_time_ == base::Time())) 1362 return; 1363 1364 base::TimeDelta duration = final_packet_time_ - request_time_snapshot_; 1365 switch (statistic) { 1366 case FilterContext::SDCH_DECODE: { 1367 UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b", 1368 static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100); 1369 return; 1370 } 1371 case FilterContext::SDCH_PASSTHROUGH: { 1372 // Despite advertising a dictionary, we handled non-sdch compressed 1373 // content. 1374 return; 1375 } 1376 1377 case FilterContext::SDCH_EXPERIMENT_DECODE: { 1378 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment3_Decode", 1379 duration, 1380 base::TimeDelta::FromMilliseconds(20), 1381 base::TimeDelta::FromMinutes(10), 100); 1382 return; 1383 } 1384 case FilterContext::SDCH_EXPERIMENT_HOLDBACK: { 1385 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment3_Holdback", 1386 duration, 1387 base::TimeDelta::FromMilliseconds(20), 1388 base::TimeDelta::FromMinutes(10), 100); 1389 return; 1390 } 1391 default: 1392 NOTREACHED(); 1393 return; 1394 } 1395} 1396 1397// The common type of histogram we use for all compression-tracking histograms. 1398#define COMPRESSION_HISTOGRAM(name, sample) \ 1399 do { \ 1400 UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \ 1401 500, 1000000, 100); \ 1402 } while (0) 1403 1404void URLRequestHttpJob::RecordCompressionHistograms() { 1405 DCHECK(request_); 1406 if (!request_) 1407 return; 1408 1409 if (is_cached_content_ || // Don't record cached content 1410 !GetStatus().is_success() || // Don't record failed content 1411 !IsCompressibleContent() || // Only record compressible content 1412 !prefilter_bytes_read()) // Zero-byte responses aren't useful. 1413 return; 1414 1415 // Miniature requests aren't really compressible. Don't count them. 1416 const int kMinSize = 16; 1417 if (prefilter_bytes_read() < kMinSize) 1418 return; 1419 1420 // Only record for http or https urls. 1421 bool is_http = request_->url().SchemeIs("http"); 1422 bool is_https = request_->url().SchemeIs("https"); 1423 if (!is_http && !is_https) 1424 return; 1425 1426 int compressed_B = prefilter_bytes_read(); 1427 int decompressed_B = postfilter_bytes_read(); 1428 bool was_filtered = HasFilter(); 1429 1430 // We want to record how often downloaded resources are compressed. 1431 // But, we recognize that different protocols may have different 1432 // properties. So, for each request, we'll put it into one of 3 1433 // groups: 1434 // a) SSL resources 1435 // Proxies cannot tamper with compression headers with SSL. 1436 // b) Non-SSL, loaded-via-proxy resources 1437 // In this case, we know a proxy might have interfered. 1438 // c) Non-SSL, loaded-without-proxy resources 1439 // In this case, we know there was no explicit proxy. However, 1440 // it is possible that a transparent proxy was still interfering. 1441 // 1442 // For each group, we record the same 3 histograms. 1443 1444 if (is_https) { 1445 if (was_filtered) { 1446 COMPRESSION_HISTOGRAM("SSL.BytesBeforeCompression", compressed_B); 1447 COMPRESSION_HISTOGRAM("SSL.BytesAfterCompression", decompressed_B); 1448 } else { 1449 COMPRESSION_HISTOGRAM("SSL.ShouldHaveBeenCompressed", decompressed_B); 1450 } 1451 return; 1452 } 1453 1454 if (request_->was_fetched_via_proxy()) { 1455 if (was_filtered) { 1456 COMPRESSION_HISTOGRAM("Proxy.BytesBeforeCompression", compressed_B); 1457 COMPRESSION_HISTOGRAM("Proxy.BytesAfterCompression", decompressed_B); 1458 } else { 1459 COMPRESSION_HISTOGRAM("Proxy.ShouldHaveBeenCompressed", decompressed_B); 1460 } 1461 return; 1462 } 1463 1464 if (was_filtered) { 1465 COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B); 1466 COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B); 1467 } else { 1468 COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B); 1469 } 1470} 1471 1472bool URLRequestHttpJob::IsCompressibleContent() const { 1473 std::string mime_type; 1474 return GetMimeType(&mime_type) && 1475 (IsSupportedJavascriptMimeType(mime_type.c_str()) || 1476 IsSupportedNonImageMimeType(mime_type.c_str())); 1477} 1478 1479void URLRequestHttpJob::RecordPerfHistograms(CompletionCause reason) { 1480 if (start_time_.is_null()) 1481 return; 1482 1483 base::TimeDelta total_time = base::TimeTicks::Now() - start_time_; 1484 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTime", total_time); 1485 1486 if (reason == FINISHED) { 1487 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeSuccess", total_time); 1488 } else { 1489 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCancel", total_time); 1490 } 1491 1492 if (response_info_) { 1493 if (response_info_->was_cached) { 1494 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCached", total_time); 1495 } else { 1496 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeNotCached", total_time); 1497 } 1498 } 1499 1500 if (request_info_.load_flags & LOAD_PREFETCH && !request_->was_cached()) 1501 UMA_HISTOGRAM_COUNTS("Net.Prefetch.PrefilterBytesReadFromNetwork", 1502 prefilter_bytes_read()); 1503 1504 start_time_ = base::TimeTicks(); 1505} 1506 1507void URLRequestHttpJob::DoneWithRequest(CompletionCause reason) { 1508 if (done_) 1509 return; 1510 done_ = true; 1511 RecordPerfHistograms(reason); 1512 if (reason == FINISHED) { 1513 request_->set_received_response_content_length(prefilter_bytes_read()); 1514 RecordCompressionHistograms(); 1515 } 1516} 1517 1518HttpResponseHeaders* URLRequestHttpJob::GetResponseHeaders() const { 1519 DCHECK(transaction_.get()); 1520 DCHECK(transaction_->GetResponseInfo()); 1521 return override_response_headers_.get() ? 1522 override_response_headers_.get() : 1523 transaction_->GetResponseInfo()->headers.get(); 1524} 1525 1526void URLRequestHttpJob::NotifyURLRequestDestroyed() { 1527 awaiting_callback_ = false; 1528} 1529 1530} // namespace net 1531