url_request_http_job.cc revision 116680a4aac90f2aa7413d9095a592090648e557
1// Copyright (c) 2012 The Chromium Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "net/url_request/url_request_http_job.h" 6 7#include "base/base_switches.h" 8#include "base/bind.h" 9#include "base/bind_helpers.h" 10#include "base/command_line.h" 11#include "base/compiler_specific.h" 12#include "base/file_version_info.h" 13#include "base/message_loop/message_loop.h" 14#include "base/metrics/field_trial.h" 15#include "base/metrics/histogram.h" 16#include "base/rand_util.h" 17#include "base/strings/string_util.h" 18#include "base/time/time.h" 19#include "net/base/host_port_pair.h" 20#include "net/base/load_flags.h" 21#include "net/base/mime_util.h" 22#include "net/base/net_errors.h" 23#include "net/base/net_util.h" 24#include "net/base/network_delegate.h" 25#include "net/base/sdch_manager.h" 26#include "net/cert/cert_status_flags.h" 27#include "net/cookies/cookie_store.h" 28#include "net/http/http_content_disposition.h" 29#include "net/http/http_network_session.h" 30#include "net/http/http_request_headers.h" 31#include "net/http/http_response_headers.h" 32#include "net/http/http_response_info.h" 33#include "net/http/http_status_code.h" 34#include "net/http/http_transaction.h" 35#include "net/http/http_transaction_factory.h" 36#include "net/http/http_util.h" 37#include "net/proxy/proxy_info.h" 38#include "net/ssl/ssl_cert_request_info.h" 39#include "net/ssl/ssl_config_service.h" 40#include "net/url_request/fraudulent_certificate_reporter.h" 41#include "net/url_request/http_user_agent_settings.h" 42#include "net/url_request/url_request.h" 43#include "net/url_request/url_request_context.h" 44#include "net/url_request/url_request_error_job.h" 45#include "net/url_request/url_request_job_factory.h" 46#include "net/url_request/url_request_redirect_job.h" 47#include "net/url_request/url_request_throttler_header_adapter.h" 48#include "net/url_request/url_request_throttler_manager.h" 49#include "net/websockets/websocket_handshake_stream_base.h" 50 51static const char kAvailDictionaryHeader[] = "Avail-Dictionary"; 52 53namespace net { 54 55class URLRequestHttpJob::HttpFilterContext : public FilterContext { 56 public: 57 explicit HttpFilterContext(URLRequestHttpJob* job); 58 virtual ~HttpFilterContext(); 59 60 // FilterContext implementation. 61 virtual bool GetMimeType(std::string* mime_type) const OVERRIDE; 62 virtual bool GetURL(GURL* gurl) const OVERRIDE; 63 virtual bool GetContentDisposition(std::string* disposition) const OVERRIDE; 64 virtual base::Time GetRequestTime() const OVERRIDE; 65 virtual bool IsCachedContent() const OVERRIDE; 66 virtual bool IsDownload() const OVERRIDE; 67 virtual bool IsSdchResponse() const OVERRIDE; 68 virtual int64 GetByteReadCount() const OVERRIDE; 69 virtual int GetResponseCode() const OVERRIDE; 70 virtual const URLRequestContext* GetURLRequestContext() const OVERRIDE; 71 virtual void RecordPacketStats(StatisticSelector statistic) const OVERRIDE; 72 73 // Method to allow us to reset filter context for a response that should have 74 // been SDCH encoded when there is an update due to an explicit HTTP header. 75 void ResetSdchResponseToFalse(); 76 77 private: 78 URLRequestHttpJob* job_; 79 80 DISALLOW_COPY_AND_ASSIGN(HttpFilterContext); 81}; 82 83URLRequestHttpJob::HttpFilterContext::HttpFilterContext(URLRequestHttpJob* job) 84 : job_(job) { 85 DCHECK(job_); 86} 87 88URLRequestHttpJob::HttpFilterContext::~HttpFilterContext() { 89} 90 91bool URLRequestHttpJob::HttpFilterContext::GetMimeType( 92 std::string* mime_type) const { 93 return job_->GetMimeType(mime_type); 94} 95 96bool URLRequestHttpJob::HttpFilterContext::GetURL(GURL* gurl) const { 97 if (!job_->request()) 98 return false; 99 *gurl = job_->request()->url(); 100 return true; 101} 102 103bool URLRequestHttpJob::HttpFilterContext::GetContentDisposition( 104 std::string* disposition) const { 105 HttpResponseHeaders* headers = job_->GetResponseHeaders(); 106 void *iter = NULL; 107 return headers->EnumerateHeader(&iter, "Content-Disposition", disposition); 108} 109 110base::Time URLRequestHttpJob::HttpFilterContext::GetRequestTime() const { 111 return job_->request() ? job_->request()->request_time() : base::Time(); 112} 113 114bool URLRequestHttpJob::HttpFilterContext::IsCachedContent() const { 115 return job_->is_cached_content_; 116} 117 118bool URLRequestHttpJob::HttpFilterContext::IsDownload() const { 119 return (job_->request_info_.load_flags & LOAD_IS_DOWNLOAD) != 0; 120} 121 122void URLRequestHttpJob::HttpFilterContext::ResetSdchResponseToFalse() { 123 DCHECK(job_->sdch_dictionary_advertised_); 124 job_->sdch_dictionary_advertised_ = false; 125} 126 127bool URLRequestHttpJob::HttpFilterContext::IsSdchResponse() const { 128 return job_->sdch_dictionary_advertised_; 129} 130 131int64 URLRequestHttpJob::HttpFilterContext::GetByteReadCount() const { 132 return job_->filter_input_byte_count(); 133} 134 135int URLRequestHttpJob::HttpFilterContext::GetResponseCode() const { 136 return job_->GetResponseCode(); 137} 138 139const URLRequestContext* 140URLRequestHttpJob::HttpFilterContext::GetURLRequestContext() const { 141 return job_->request() ? job_->request()->context() : NULL; 142} 143 144void URLRequestHttpJob::HttpFilterContext::RecordPacketStats( 145 StatisticSelector statistic) const { 146 job_->RecordPacketStats(statistic); 147} 148 149// TODO(darin): make sure the port blocking code is not lost 150// static 151URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request, 152 NetworkDelegate* network_delegate, 153 const std::string& scheme) { 154 DCHECK(scheme == "http" || scheme == "https" || scheme == "ws" || 155 scheme == "wss"); 156 157 if (!request->context()->http_transaction_factory()) { 158 NOTREACHED() << "requires a valid context"; 159 return new URLRequestErrorJob( 160 request, network_delegate, ERR_INVALID_ARGUMENT); 161 } 162 163 GURL redirect_url; 164 if (request->GetHSTSRedirect(&redirect_url)) { 165 return new URLRequestRedirectJob( 166 request, network_delegate, redirect_url, 167 // Use status code 307 to preserve the method, so POST requests work. 168 URLRequestRedirectJob::REDIRECT_307_TEMPORARY_REDIRECT, "HSTS"); 169 } 170 return new URLRequestHttpJob(request, 171 network_delegate, 172 request->context()->http_user_agent_settings()); 173} 174 175URLRequestHttpJob::URLRequestHttpJob( 176 URLRequest* request, 177 NetworkDelegate* network_delegate, 178 const HttpUserAgentSettings* http_user_agent_settings) 179 : URLRequestJob(request, network_delegate), 180 priority_(DEFAULT_PRIORITY), 181 response_info_(NULL), 182 response_cookies_save_index_(0), 183 proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH), 184 server_auth_state_(AUTH_STATE_DONT_NEED_AUTH), 185 start_callback_(base::Bind(&URLRequestHttpJob::OnStartCompleted, 186 base::Unretained(this))), 187 notify_before_headers_sent_callback_( 188 base::Bind(&URLRequestHttpJob::NotifyBeforeSendHeadersCallback, 189 base::Unretained(this))), 190 read_in_progress_(false), 191 throttling_entry_(NULL), 192 sdch_dictionary_advertised_(false), 193 sdch_test_activated_(false), 194 sdch_test_control_(false), 195 is_cached_content_(false), 196 request_creation_time_(), 197 packet_timing_enabled_(false), 198 done_(false), 199 bytes_observed_in_packets_(0), 200 request_time_snapshot_(), 201 final_packet_time_(), 202 filter_context_(new HttpFilterContext(this)), 203 on_headers_received_callback_( 204 base::Bind(&URLRequestHttpJob::OnHeadersReceivedCallback, 205 base::Unretained(this))), 206 awaiting_callback_(false), 207 http_user_agent_settings_(http_user_agent_settings), 208 weak_factory_(this) { 209 URLRequestThrottlerManager* manager = request->context()->throttler_manager(); 210 if (manager) 211 throttling_entry_ = manager->RegisterRequestUrl(request->url()); 212 213 ResetTimer(); 214} 215 216URLRequestHttpJob::~URLRequestHttpJob() { 217 CHECK(!awaiting_callback_); 218 219 DCHECK(!sdch_test_control_ || !sdch_test_activated_); 220 if (!is_cached_content_) { 221 if (sdch_test_control_) 222 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK); 223 if (sdch_test_activated_) 224 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE); 225 } 226 // Make sure SDCH filters are told to emit histogram data while 227 // filter_context_ is still alive. 228 DestroyFilters(); 229 230 DoneWithRequest(ABORTED); 231} 232 233void URLRequestHttpJob::SetPriority(RequestPriority priority) { 234 priority_ = priority; 235 if (transaction_) 236 transaction_->SetPriority(priority_); 237} 238 239void URLRequestHttpJob::Start() { 240 DCHECK(!transaction_.get()); 241 242 // URLRequest::SetReferrer ensures that we do not send username and password 243 // fields in the referrer. 244 GURL referrer(request_->referrer()); 245 246 request_info_.url = request_->url(); 247 request_info_.method = request_->method(); 248 request_info_.load_flags = request_->load_flags(); 249 // Enable privacy mode if cookie settings or flags tell us not send or 250 // save cookies. 251 bool enable_privacy_mode = 252 (request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES) || 253 (request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) || 254 CanEnablePrivacyMode(); 255 // Privacy mode could still be disabled in OnCookiesLoaded if we are going 256 // to send previously saved cookies. 257 request_info_.privacy_mode = enable_privacy_mode ? 258 PRIVACY_MODE_ENABLED : PRIVACY_MODE_DISABLED; 259 260 // Strip Referer from request_info_.extra_headers to prevent, e.g., plugins 261 // from overriding headers that are controlled using other means. Otherwise a 262 // plugin could set a referrer although sending the referrer is inhibited. 263 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kReferer); 264 265 // Our consumer should have made sure that this is a safe referrer. See for 266 // instance WebCore::FrameLoader::HideReferrer. 267 if (referrer.is_valid()) { 268 request_info_.extra_headers.SetHeader(HttpRequestHeaders::kReferer, 269 referrer.spec()); 270 } 271 272 request_info_.extra_headers.SetHeaderIfMissing( 273 HttpRequestHeaders::kUserAgent, 274 http_user_agent_settings_ ? 275 http_user_agent_settings_->GetUserAgent() : std::string()); 276 277 AddExtraHeaders(); 278 AddCookieHeaderAndStart(); 279} 280 281void URLRequestHttpJob::Kill() { 282 if (!transaction_.get()) 283 return; 284 285 weak_factory_.InvalidateWeakPtrs(); 286 DestroyTransaction(); 287 URLRequestJob::Kill(); 288} 289 290void URLRequestHttpJob::NotifyBeforeSendProxyHeadersCallback( 291 const ProxyInfo& proxy_info, 292 HttpRequestHeaders* request_headers) { 293 DCHECK(request_headers); 294 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); 295 if (network_delegate()) { 296 network_delegate()->NotifyBeforeSendProxyHeaders( 297 request_, 298 proxy_info, 299 request_headers); 300 } 301} 302 303void URLRequestHttpJob::NotifyHeadersComplete() { 304 DCHECK(!response_info_); 305 306 response_info_ = transaction_->GetResponseInfo(); 307 308 // Save boolean, as we'll need this info at destruction time, and filters may 309 // also need this info. 310 is_cached_content_ = response_info_->was_cached; 311 312 if (!is_cached_content_ && throttling_entry_.get()) { 313 URLRequestThrottlerHeaderAdapter response_adapter(GetResponseHeaders()); 314 throttling_entry_->UpdateWithResponse(request_info_.url.host(), 315 &response_adapter); 316 } 317 318 // The ordering of these calls is not important. 319 ProcessStrictTransportSecurityHeader(); 320 ProcessPublicKeyPinsHeader(); 321 322 SdchManager* sdch_manager(request()->context()->sdch_manager()); 323 if (sdch_manager && sdch_manager->IsInSupportedDomain(request_->url())) { 324 const std::string name = "Get-Dictionary"; 325 std::string url_text; 326 void* iter = NULL; 327 // TODO(jar): We need to not fetch dictionaries the first time they are 328 // seen, but rather wait until we can justify their usefulness. 329 // For now, we will only fetch the first dictionary, which will at least 330 // require multiple suggestions before we get additional ones for this site. 331 // Eventually we should wait until a dictionary is requested several times 332 // before we even download it (so that we don't waste memory or bandwidth). 333 if (GetResponseHeaders()->EnumerateHeader(&iter, name, &url_text)) { 334 // Resolve suggested URL relative to request url. 335 GURL sdch_dictionary_url = request_->url().Resolve(url_text); 336 if (sdch_dictionary_url.is_valid()) { 337 sdch_manager->FetchDictionary(request_->url(), sdch_dictionary_url); 338 } 339 } 340 } 341 342 // The HTTP transaction may be restarted several times for the purposes 343 // of sending authorization information. Each time it restarts, we get 344 // notified of the headers completion so that we can update the cookie store. 345 if (transaction_->IsReadyToRestartForAuth()) { 346 DCHECK(!response_info_->auth_challenge.get()); 347 // TODO(battre): This breaks the webrequest API for 348 // URLRequestTestHTTP.BasicAuthWithCookies 349 // where OnBeforeSendHeaders -> OnSendHeaders -> OnBeforeSendHeaders 350 // occurs. 351 RestartTransactionWithAuth(AuthCredentials()); 352 return; 353 } 354 355 URLRequestJob::NotifyHeadersComplete(); 356} 357 358void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) { 359 DoneWithRequest(FINISHED); 360 URLRequestJob::NotifyDone(status); 361} 362 363void URLRequestHttpJob::DestroyTransaction() { 364 DCHECK(transaction_.get()); 365 366 DoneWithRequest(ABORTED); 367 transaction_.reset(); 368 response_info_ = NULL; 369 receive_headers_end_ = base::TimeTicks(); 370} 371 372void URLRequestHttpJob::StartTransaction() { 373 if (network_delegate()) { 374 OnCallToDelegate(); 375 int rv = network_delegate()->NotifyBeforeSendHeaders( 376 request_, notify_before_headers_sent_callback_, 377 &request_info_.extra_headers); 378 // If an extension blocks the request, we rely on the callback to 379 // MaybeStartTransactionInternal(). 380 if (rv == ERR_IO_PENDING) 381 return; 382 MaybeStartTransactionInternal(rv); 383 return; 384 } 385 StartTransactionInternal(); 386} 387 388void URLRequestHttpJob::NotifyBeforeSendHeadersCallback(int result) { 389 // Check that there are no callbacks to already canceled requests. 390 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); 391 392 MaybeStartTransactionInternal(result); 393} 394 395void URLRequestHttpJob::MaybeStartTransactionInternal(int result) { 396 OnCallToDelegateComplete(); 397 if (result == OK) { 398 StartTransactionInternal(); 399 } else { 400 std::string source("delegate"); 401 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 402 NetLog::StringCallback("source", &source)); 403 NotifyCanceled(); 404 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 405 } 406} 407 408void URLRequestHttpJob::StartTransactionInternal() { 409 // NOTE: This method assumes that request_info_ is already setup properly. 410 411 // If we already have a transaction, then we should restart the transaction 412 // with auth provided by auth_credentials_. 413 414 int rv; 415 416 if (network_delegate()) { 417 network_delegate()->NotifySendHeaders( 418 request_, request_info_.extra_headers); 419 } 420 421 if (transaction_.get()) { 422 rv = transaction_->RestartWithAuth(auth_credentials_, start_callback_); 423 auth_credentials_ = AuthCredentials(); 424 } else { 425 DCHECK(request_->context()->http_transaction_factory()); 426 427 rv = request_->context()->http_transaction_factory()->CreateTransaction( 428 priority_, &transaction_); 429 430 if (rv == OK && request_info_.url.SchemeIsWSOrWSS()) { 431 // TODO(ricea): Implement WebSocket throttling semantics as defined in 432 // RFC6455 Section 4.1. 433 base::SupportsUserData::Data* data = request_->GetUserData( 434 WebSocketHandshakeStreamBase::CreateHelper::DataKey()); 435 if (data) { 436 transaction_->SetWebSocketHandshakeStreamCreateHelper( 437 static_cast<WebSocketHandshakeStreamBase::CreateHelper*>(data)); 438 } else { 439 rv = ERR_DISALLOWED_URL_SCHEME; 440 } 441 } 442 443 if (rv == OK) { 444 transaction_->SetBeforeNetworkStartCallback( 445 base::Bind(&URLRequestHttpJob::NotifyBeforeNetworkStart, 446 base::Unretained(this))); 447 transaction_->SetBeforeProxyHeadersSentCallback( 448 base::Bind(&URLRequestHttpJob::NotifyBeforeSendProxyHeadersCallback, 449 base::Unretained(this))); 450 451 if (!throttling_entry_.get() || 452 !throttling_entry_->ShouldRejectRequest(*request_)) { 453 rv = transaction_->Start( 454 &request_info_, start_callback_, request_->net_log()); 455 start_time_ = base::TimeTicks::Now(); 456 } else { 457 // Special error code for the exponential back-off module. 458 rv = ERR_TEMPORARILY_THROTTLED; 459 } 460 } 461 } 462 463 if (rv == ERR_IO_PENDING) 464 return; 465 466 // The transaction started synchronously, but we need to notify the 467 // URLRequest delegate via the message loop. 468 base::MessageLoop::current()->PostTask( 469 FROM_HERE, 470 base::Bind(&URLRequestHttpJob::OnStartCompleted, 471 weak_factory_.GetWeakPtr(), rv)); 472} 473 474void URLRequestHttpJob::AddExtraHeaders() { 475 SdchManager* sdch_manager = request()->context()->sdch_manager(); 476 477 // Supply Accept-Encoding field only if it is not already provided. 478 // It should be provided IF the content is known to have restrictions on 479 // potential encoding, such as streaming multi-media. 480 // For details see bug 47381. 481 // TODO(jar, enal): jpeg files etc. should set up a request header if 482 // possible. Right now it is done only by buffered_resource_loader and 483 // simple_data_source. 484 if (!request_info_.extra_headers.HasHeader( 485 HttpRequestHeaders::kAcceptEncoding)) { 486 bool advertise_sdch = sdch_manager && 487 // We don't support SDCH responses to POST as there is a possibility 488 // of having SDCH encoded responses returned (e.g. by the cache) 489 // which we cannot decode, and in those situations, we will need 490 // to retransmit the request without SDCH, which is illegal for a POST. 491 request()->method() != "POST" && 492 sdch_manager->IsInSupportedDomain(request_->url()); 493 std::string avail_dictionaries; 494 if (advertise_sdch) { 495 sdch_manager->GetAvailDictionaryList(request_->url(), 496 &avail_dictionaries); 497 498 // The AllowLatencyExperiment() is only true if we've successfully done a 499 // full SDCH compression recently in this browser session for this host. 500 // Note that for this path, there might be no applicable dictionaries, 501 // and hence we can't participate in the experiment. 502 if (!avail_dictionaries.empty() && 503 sdch_manager->AllowLatencyExperiment(request_->url())) { 504 // We are participating in the test (or control), and hence we'll 505 // eventually record statistics via either SDCH_EXPERIMENT_DECODE or 506 // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data. 507 packet_timing_enabled_ = true; 508 if (base::RandDouble() < .01) { 509 sdch_test_control_ = true; // 1% probability. 510 advertise_sdch = false; 511 } else { 512 sdch_test_activated_ = true; 513 } 514 } 515 } 516 517 // Supply Accept-Encoding headers first so that it is more likely that they 518 // will be in the first transmitted packet. This can sometimes make it 519 // easier to filter and analyze the streams to assure that a proxy has not 520 // damaged these headers. Some proxies deliberately corrupt Accept-Encoding 521 // headers. 522 if (!advertise_sdch) { 523 // Tell the server what compression formats we support (other than SDCH). 524 request_info_.extra_headers.SetHeader( 525 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate"); 526 } else { 527 // Include SDCH in acceptable list. 528 request_info_.extra_headers.SetHeader( 529 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch"); 530 if (!avail_dictionaries.empty()) { 531 request_info_.extra_headers.SetHeader( 532 kAvailDictionaryHeader, 533 avail_dictionaries); 534 sdch_dictionary_advertised_ = true; 535 // Since we're tagging this transaction as advertising a dictionary, 536 // we'll definitely employ an SDCH filter (or tentative sdch filter) 537 // when we get a response. When done, we'll record histograms via 538 // SDCH_DECODE or SDCH_PASSTHROUGH. Hence we need to record packet 539 // arrival times. 540 packet_timing_enabled_ = true; 541 } 542 } 543 } 544 545 if (http_user_agent_settings_) { 546 // Only add default Accept-Language if the request didn't have it 547 // specified. 548 std::string accept_language = 549 http_user_agent_settings_->GetAcceptLanguage(); 550 if (!accept_language.empty()) { 551 request_info_.extra_headers.SetHeaderIfMissing( 552 HttpRequestHeaders::kAcceptLanguage, 553 accept_language); 554 } 555 } 556} 557 558void URLRequestHttpJob::AddCookieHeaderAndStart() { 559 // No matter what, we want to report our status as IO pending since we will 560 // be notifying our consumer asynchronously via OnStartCompleted. 561 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 562 563 // If the request was destroyed, then there is no more work to do. 564 if (!request_) 565 return; 566 567 CookieStore* cookie_store = GetCookieStore(); 568 if (cookie_store && !(request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES)) { 569 cookie_store->GetAllCookiesForURLAsync( 570 request_->url(), 571 base::Bind(&URLRequestHttpJob::CheckCookiePolicyAndLoad, 572 weak_factory_.GetWeakPtr())); 573 } else { 574 DoStartTransaction(); 575 } 576} 577 578void URLRequestHttpJob::DoLoadCookies() { 579 CookieOptions options; 580 options.set_include_httponly(); 581 GetCookieStore()->GetCookiesWithOptionsAsync( 582 request_->url(), options, 583 base::Bind(&URLRequestHttpJob::OnCookiesLoaded, 584 weak_factory_.GetWeakPtr())); 585} 586 587void URLRequestHttpJob::CheckCookiePolicyAndLoad( 588 const CookieList& cookie_list) { 589 if (CanGetCookies(cookie_list)) 590 DoLoadCookies(); 591 else 592 DoStartTransaction(); 593} 594 595void URLRequestHttpJob::OnCookiesLoaded(const std::string& cookie_line) { 596 if (!cookie_line.empty()) { 597 request_info_.extra_headers.SetHeader( 598 HttpRequestHeaders::kCookie, cookie_line); 599 // Disable privacy mode as we are sending cookies anyway. 600 request_info_.privacy_mode = PRIVACY_MODE_DISABLED; 601 } 602 DoStartTransaction(); 603} 604 605void URLRequestHttpJob::DoStartTransaction() { 606 // We may have been canceled while retrieving cookies. 607 if (GetStatus().is_success()) { 608 StartTransaction(); 609 } else { 610 NotifyCanceled(); 611 } 612} 613 614void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete(int result) { 615 // End of the call started in OnStartCompleted. 616 OnCallToDelegateComplete(); 617 618 if (result != net::OK) { 619 std::string source("delegate"); 620 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 621 NetLog::StringCallback("source", &source)); 622 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 623 return; 624 } 625 626 DCHECK(transaction_.get()); 627 628 const HttpResponseInfo* response_info = transaction_->GetResponseInfo(); 629 DCHECK(response_info); 630 631 response_cookies_.clear(); 632 response_cookies_save_index_ = 0; 633 634 FetchResponseCookies(&response_cookies_); 635 636 if (!GetResponseHeaders()->GetDateValue(&response_date_)) 637 response_date_ = base::Time(); 638 639 // Now, loop over the response cookies, and attempt to persist each. 640 SaveNextCookie(); 641} 642 643// If the save occurs synchronously, SaveNextCookie will loop and save the next 644// cookie. If the save is deferred, the callback is responsible for continuing 645// to iterate through the cookies. 646// TODO(erikwright): Modify the CookieStore API to indicate via return value 647// whether it completed synchronously or asynchronously. 648// See http://crbug.com/131066. 649void URLRequestHttpJob::SaveNextCookie() { 650 // No matter what, we want to report our status as IO pending since we will 651 // be notifying our consumer asynchronously via OnStartCompleted. 652 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 653 654 // Used to communicate with the callback. See the implementation of 655 // OnCookieSaved. 656 scoped_refptr<SharedBoolean> callback_pending = new SharedBoolean(false); 657 scoped_refptr<SharedBoolean> save_next_cookie_running = 658 new SharedBoolean(true); 659 660 if (!(request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) && 661 GetCookieStore() && response_cookies_.size() > 0) { 662 CookieOptions options; 663 options.set_include_httponly(); 664 options.set_server_time(response_date_); 665 666 net::CookieStore::SetCookiesCallback callback( 667 base::Bind(&URLRequestHttpJob::OnCookieSaved, 668 weak_factory_.GetWeakPtr(), 669 save_next_cookie_running, 670 callback_pending)); 671 672 // Loop through the cookies as long as SetCookieWithOptionsAsync completes 673 // synchronously. 674 while (!callback_pending->data && 675 response_cookies_save_index_ < response_cookies_.size()) { 676 if (CanSetCookie( 677 response_cookies_[response_cookies_save_index_], &options)) { 678 callback_pending->data = true; 679 GetCookieStore()->SetCookieWithOptionsAsync( 680 request_->url(), response_cookies_[response_cookies_save_index_], 681 options, callback); 682 } 683 ++response_cookies_save_index_; 684 } 685 } 686 687 save_next_cookie_running->data = false; 688 689 if (!callback_pending->data) { 690 response_cookies_.clear(); 691 response_cookies_save_index_ = 0; 692 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status 693 NotifyHeadersComplete(); 694 return; 695 } 696} 697 698// |save_next_cookie_running| is true when the callback is bound and set to 699// false when SaveNextCookie exits, allowing the callback to determine if the 700// save occurred synchronously or asynchronously. 701// |callback_pending| is false when the callback is invoked and will be set to 702// true by the callback, allowing SaveNextCookie to detect whether the save 703// occurred synchronously. 704// See SaveNextCookie() for more information. 705void URLRequestHttpJob::OnCookieSaved( 706 scoped_refptr<SharedBoolean> save_next_cookie_running, 707 scoped_refptr<SharedBoolean> callback_pending, 708 bool cookie_status) { 709 callback_pending->data = false; 710 711 // If we were called synchronously, return. 712 if (save_next_cookie_running->data) { 713 return; 714 } 715 716 // We were called asynchronously, so trigger the next save. 717 // We may have been canceled within OnSetCookie. 718 if (GetStatus().is_success()) { 719 SaveNextCookie(); 720 } else { 721 NotifyCanceled(); 722 } 723} 724 725void URLRequestHttpJob::FetchResponseCookies( 726 std::vector<std::string>* cookies) { 727 const std::string name = "Set-Cookie"; 728 std::string value; 729 730 void* iter = NULL; 731 HttpResponseHeaders* headers = GetResponseHeaders(); 732 while (headers->EnumerateHeader(&iter, name, &value)) { 733 if (!value.empty()) 734 cookies->push_back(value); 735 } 736} 737 738// NOTE: |ProcessStrictTransportSecurityHeader| and 739// |ProcessPublicKeyPinsHeader| have very similar structures, by design. 740void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() { 741 DCHECK(response_info_); 742 TransportSecurityState* security_state = 743 request_->context()->transport_security_state(); 744 const SSLInfo& ssl_info = response_info_->ssl_info; 745 746 // Only accept HSTS headers on HTTPS connections that have no 747 // certificate errors. 748 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) || 749 !security_state) 750 return; 751 752 // http://tools.ietf.org/html/draft-ietf-websec-strict-transport-sec: 753 // 754 // If a UA receives more than one STS header field in a HTTP response 755 // message over secure transport, then the UA MUST process only the 756 // first such header field. 757 HttpResponseHeaders* headers = GetResponseHeaders(); 758 std::string value; 759 if (headers->EnumerateHeader(NULL, "Strict-Transport-Security", &value)) 760 security_state->AddHSTSHeader(request_info_.url.host(), value); 761} 762 763void URLRequestHttpJob::ProcessPublicKeyPinsHeader() { 764 DCHECK(response_info_); 765 TransportSecurityState* security_state = 766 request_->context()->transport_security_state(); 767 const SSLInfo& ssl_info = response_info_->ssl_info; 768 769 // Only accept HPKP headers on HTTPS connections that have no 770 // certificate errors. 771 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) || 772 !security_state) 773 return; 774 775 // http://tools.ietf.org/html/draft-ietf-websec-key-pinning: 776 // 777 // If a UA receives more than one PKP header field in an HTTP 778 // response message over secure transport, then the UA MUST process 779 // only the first such header field. 780 HttpResponseHeaders* headers = GetResponseHeaders(); 781 std::string value; 782 if (headers->EnumerateHeader(NULL, "Public-Key-Pins", &value)) 783 security_state->AddHPKPHeader(request_info_.url.host(), value, ssl_info); 784} 785 786void URLRequestHttpJob::OnStartCompleted(int result) { 787 RecordTimer(); 788 789 // If the request was destroyed, then there is no more work to do. 790 if (!request_) 791 return; 792 793 // If the job is done (due to cancellation), can just ignore this 794 // notification. 795 if (done_) 796 return; 797 798 receive_headers_end_ = base::TimeTicks::Now(); 799 800 // Clear the IO_PENDING status 801 SetStatus(URLRequestStatus()); 802 803 const URLRequestContext* context = request_->context(); 804 805 if (result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN && 806 transaction_->GetResponseInfo() != NULL) { 807 FraudulentCertificateReporter* reporter = 808 context->fraudulent_certificate_reporter(); 809 if (reporter != NULL) { 810 const SSLInfo& ssl_info = transaction_->GetResponseInfo()->ssl_info; 811 bool sni_available = SSLConfigService::IsSNIAvailable( 812 context->ssl_config_service()); 813 const std::string& host = request_->url().host(); 814 815 reporter->SendReport(host, ssl_info, sni_available); 816 } 817 } 818 819 if (result == OK) { 820 if (transaction_ && transaction_->GetResponseInfo()) { 821 SetProxyServer(transaction_->GetResponseInfo()->proxy_server); 822 } 823 scoped_refptr<HttpResponseHeaders> headers = GetResponseHeaders(); 824 if (network_delegate()) { 825 // Note that |this| may not be deleted until 826 // |on_headers_received_callback_| or 827 // |NetworkDelegate::URLRequestDestroyed()| has been called. 828 OnCallToDelegate(); 829 allowed_unsafe_redirect_url_ = GURL(); 830 int error = network_delegate()->NotifyHeadersReceived( 831 request_, 832 on_headers_received_callback_, 833 headers.get(), 834 &override_response_headers_, 835 &allowed_unsafe_redirect_url_); 836 if (error != net::OK) { 837 if (error == net::ERR_IO_PENDING) { 838 awaiting_callback_ = true; 839 } else { 840 std::string source("delegate"); 841 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 842 NetLog::StringCallback("source", 843 &source)); 844 OnCallToDelegateComplete(); 845 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, error)); 846 } 847 return; 848 } 849 } 850 851 SaveCookiesAndNotifyHeadersComplete(net::OK); 852 } else if (IsCertificateError(result)) { 853 // We encountered an SSL certificate error. 854 if (result == ERR_SSL_WEAK_SERVER_EPHEMERAL_DH_KEY || 855 result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN) { 856 // These are hard failures. They're handled separately and don't have 857 // the correct cert status, so set it here. 858 SSLInfo info(transaction_->GetResponseInfo()->ssl_info); 859 info.cert_status = MapNetErrorToCertStatus(result); 860 NotifySSLCertificateError(info, true); 861 } else { 862 // Maybe overridable, maybe not. Ask the delegate to decide. 863 const URLRequestContext* context = request_->context(); 864 TransportSecurityState* state = context->transport_security_state(); 865 const bool fatal = 866 state && 867 state->ShouldSSLErrorsBeFatal( 868 request_info_.url.host(), 869 SSLConfigService::IsSNIAvailable(context->ssl_config_service())); 870 NotifySSLCertificateError( 871 transaction_->GetResponseInfo()->ssl_info, fatal); 872 } 873 } else if (result == ERR_SSL_CLIENT_AUTH_CERT_NEEDED) { 874 NotifyCertificateRequested( 875 transaction_->GetResponseInfo()->cert_request_info.get()); 876 } else { 877 // Even on an error, there may be useful information in the response 878 // info (e.g. whether there's a cached copy). 879 if (transaction_.get()) 880 response_info_ = transaction_->GetResponseInfo(); 881 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 882 } 883} 884 885void URLRequestHttpJob::OnHeadersReceivedCallback(int result) { 886 awaiting_callback_ = false; 887 888 // Check that there are no callbacks to already canceled requests. 889 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); 890 891 SaveCookiesAndNotifyHeadersComplete(result); 892} 893 894void URLRequestHttpJob::OnReadCompleted(int result) { 895 read_in_progress_ = false; 896 897 if (ShouldFixMismatchedContentLength(result)) 898 result = OK; 899 900 if (result == OK) { 901 NotifyDone(URLRequestStatus()); 902 } else if (result < 0) { 903 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); 904 } else { 905 // Clear the IO_PENDING status 906 SetStatus(URLRequestStatus()); 907 } 908 909 NotifyReadComplete(result); 910} 911 912void URLRequestHttpJob::RestartTransactionWithAuth( 913 const AuthCredentials& credentials) { 914 auth_credentials_ = credentials; 915 916 // These will be reset in OnStartCompleted. 917 response_info_ = NULL; 918 receive_headers_end_ = base::TimeTicks(); 919 response_cookies_.clear(); 920 921 ResetTimer(); 922 923 // Update the cookies, since the cookie store may have been updated from the 924 // headers in the 401/407. Since cookies were already appended to 925 // extra_headers, we need to strip them out before adding them again. 926 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kCookie); 927 928 AddCookieHeaderAndStart(); 929} 930 931void URLRequestHttpJob::SetUpload(UploadDataStream* upload) { 932 DCHECK(!transaction_.get()) << "cannot change once started"; 933 request_info_.upload_data_stream = upload; 934} 935 936void URLRequestHttpJob::SetExtraRequestHeaders( 937 const HttpRequestHeaders& headers) { 938 DCHECK(!transaction_.get()) << "cannot change once started"; 939 request_info_.extra_headers.CopyFrom(headers); 940} 941 942LoadState URLRequestHttpJob::GetLoadState() const { 943 return transaction_.get() ? 944 transaction_->GetLoadState() : LOAD_STATE_IDLE; 945} 946 947UploadProgress URLRequestHttpJob::GetUploadProgress() const { 948 return transaction_.get() ? 949 transaction_->GetUploadProgress() : UploadProgress(); 950} 951 952bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const { 953 DCHECK(transaction_.get()); 954 955 if (!response_info_) 956 return false; 957 958 return GetResponseHeaders()->GetMimeType(mime_type); 959} 960 961bool URLRequestHttpJob::GetCharset(std::string* charset) { 962 DCHECK(transaction_.get()); 963 964 if (!response_info_) 965 return false; 966 967 return GetResponseHeaders()->GetCharset(charset); 968} 969 970void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) { 971 DCHECK(request_); 972 973 if (response_info_) { 974 DCHECK(transaction_.get()); 975 976 *info = *response_info_; 977 if (override_response_headers_.get()) 978 info->headers = override_response_headers_; 979 } 980} 981 982void URLRequestHttpJob::GetLoadTimingInfo( 983 LoadTimingInfo* load_timing_info) const { 984 // If haven't made it far enough to receive any headers, don't return 985 // anything. This makes for more consistent behavior in the case of errors. 986 if (!transaction_ || receive_headers_end_.is_null()) 987 return; 988 if (transaction_->GetLoadTimingInfo(load_timing_info)) 989 load_timing_info->receive_headers_end = receive_headers_end_; 990} 991 992bool URLRequestHttpJob::GetResponseCookies(std::vector<std::string>* cookies) { 993 DCHECK(transaction_.get()); 994 995 if (!response_info_) 996 return false; 997 998 // TODO(darin): Why are we extracting response cookies again? Perhaps we 999 // should just leverage response_cookies_. 1000 1001 cookies->clear(); 1002 FetchResponseCookies(cookies); 1003 return true; 1004} 1005 1006int URLRequestHttpJob::GetResponseCode() const { 1007 DCHECK(transaction_.get()); 1008 1009 if (!response_info_) 1010 return -1; 1011 1012 return GetResponseHeaders()->response_code(); 1013} 1014 1015Filter* URLRequestHttpJob::SetupFilter() const { 1016 DCHECK(transaction_.get()); 1017 if (!response_info_) 1018 return NULL; 1019 1020 std::vector<Filter::FilterType> encoding_types; 1021 std::string encoding_type; 1022 HttpResponseHeaders* headers = GetResponseHeaders(); 1023 void* iter = NULL; 1024 while (headers->EnumerateHeader(&iter, "Content-Encoding", &encoding_type)) { 1025 encoding_types.push_back(Filter::ConvertEncodingToType(encoding_type)); 1026 } 1027 1028 if (filter_context_->IsSdchResponse()) { 1029 // We are wary of proxies that discard or damage SDCH encoding. If a server 1030 // explicitly states that this is not SDCH content, then we can correct our 1031 // assumption that this is an SDCH response, and avoid the need to recover 1032 // as though the content is corrupted (when we discover it is not SDCH 1033 // encoded). 1034 std::string sdch_response_status; 1035 iter = NULL; 1036 while (headers->EnumerateHeader(&iter, "X-Sdch-Encode", 1037 &sdch_response_status)) { 1038 if (sdch_response_status == "0") { 1039 filter_context_->ResetSdchResponseToFalse(); 1040 break; 1041 } 1042 } 1043 } 1044 1045 // Even if encoding types are empty, there is a chance that we need to add 1046 // some decoding, as some proxies strip encoding completely. In such cases, 1047 // we may need to add (for example) SDCH filtering (when the context suggests 1048 // it is appropriate). 1049 Filter::FixupEncodingTypes(*filter_context_, &encoding_types); 1050 1051 return !encoding_types.empty() 1052 ? Filter::Factory(encoding_types, *filter_context_) : NULL; 1053} 1054 1055bool URLRequestHttpJob::CopyFragmentOnRedirect(const GURL& location) const { 1056 // Allow modification of reference fragments by default, unless 1057 // |allowed_unsafe_redirect_url_| is set and equal to the redirect URL. 1058 // When this is the case, we assume that the network delegate has set the 1059 // desired redirect URL (with or without fragment), so it must not be changed 1060 // any more. 1061 return !allowed_unsafe_redirect_url_.is_valid() || 1062 allowed_unsafe_redirect_url_ != location; 1063} 1064 1065bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) { 1066 // HTTP is always safe. 1067 // TODO(pauljensen): Remove once crbug.com/146591 is fixed. 1068 if (location.is_valid() && 1069 (location.scheme() == "http" || location.scheme() == "https")) { 1070 return true; 1071 } 1072 // Delegates may mark a URL as safe for redirection. 1073 if (allowed_unsafe_redirect_url_.is_valid() && 1074 allowed_unsafe_redirect_url_ == location) { 1075 return true; 1076 } 1077 // Query URLRequestJobFactory as to whether |location| would be safe to 1078 // redirect to. 1079 return request_->context()->job_factory() && 1080 request_->context()->job_factory()->IsSafeRedirectTarget(location); 1081} 1082 1083bool URLRequestHttpJob::NeedsAuth() { 1084 int code = GetResponseCode(); 1085 if (code == -1) 1086 return false; 1087 1088 // Check if we need either Proxy or WWW Authentication. This could happen 1089 // because we either provided no auth info, or provided incorrect info. 1090 switch (code) { 1091 case 407: 1092 if (proxy_auth_state_ == AUTH_STATE_CANCELED) 1093 return false; 1094 proxy_auth_state_ = AUTH_STATE_NEED_AUTH; 1095 return true; 1096 case 401: 1097 if (server_auth_state_ == AUTH_STATE_CANCELED) 1098 return false; 1099 server_auth_state_ = AUTH_STATE_NEED_AUTH; 1100 return true; 1101 } 1102 return false; 1103} 1104 1105void URLRequestHttpJob::GetAuthChallengeInfo( 1106 scoped_refptr<AuthChallengeInfo>* result) { 1107 DCHECK(transaction_.get()); 1108 DCHECK(response_info_); 1109 1110 // sanity checks: 1111 DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH || 1112 server_auth_state_ == AUTH_STATE_NEED_AUTH); 1113 DCHECK((GetResponseHeaders()->response_code() == HTTP_UNAUTHORIZED) || 1114 (GetResponseHeaders()->response_code() == 1115 HTTP_PROXY_AUTHENTICATION_REQUIRED)); 1116 1117 *result = response_info_->auth_challenge; 1118} 1119 1120void URLRequestHttpJob::SetAuth(const AuthCredentials& credentials) { 1121 DCHECK(transaction_.get()); 1122 1123 // Proxy gets set first, then WWW. 1124 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { 1125 proxy_auth_state_ = AUTH_STATE_HAVE_AUTH; 1126 } else { 1127 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); 1128 server_auth_state_ = AUTH_STATE_HAVE_AUTH; 1129 } 1130 1131 RestartTransactionWithAuth(credentials); 1132} 1133 1134void URLRequestHttpJob::CancelAuth() { 1135 // Proxy gets set first, then WWW. 1136 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { 1137 proxy_auth_state_ = AUTH_STATE_CANCELED; 1138 } else { 1139 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); 1140 server_auth_state_ = AUTH_STATE_CANCELED; 1141 } 1142 1143 // These will be reset in OnStartCompleted. 1144 response_info_ = NULL; 1145 receive_headers_end_ = base::TimeTicks::Now(); 1146 response_cookies_.clear(); 1147 1148 ResetTimer(); 1149 1150 // OK, let the consumer read the error page... 1151 // 1152 // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false, 1153 // which will cause the consumer to receive OnResponseStarted instead of 1154 // OnAuthRequired. 1155 // 1156 // We have to do this via InvokeLater to avoid "recursing" the consumer. 1157 // 1158 base::MessageLoop::current()->PostTask( 1159 FROM_HERE, 1160 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1161 weak_factory_.GetWeakPtr(), OK)); 1162} 1163 1164void URLRequestHttpJob::ContinueWithCertificate( 1165 X509Certificate* client_cert) { 1166 DCHECK(transaction_.get()); 1167 1168 DCHECK(!response_info_) << "should not have a response yet"; 1169 receive_headers_end_ = base::TimeTicks(); 1170 1171 ResetTimer(); 1172 1173 // No matter what, we want to report our status as IO pending since we will 1174 // be notifying our consumer asynchronously via OnStartCompleted. 1175 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1176 1177 int rv = transaction_->RestartWithCertificate(client_cert, start_callback_); 1178 if (rv == ERR_IO_PENDING) 1179 return; 1180 1181 // The transaction started synchronously, but we need to notify the 1182 // URLRequest delegate via the message loop. 1183 base::MessageLoop::current()->PostTask( 1184 FROM_HERE, 1185 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1186 weak_factory_.GetWeakPtr(), rv)); 1187} 1188 1189void URLRequestHttpJob::ContinueDespiteLastError() { 1190 // If the transaction was destroyed, then the job was cancelled. 1191 if (!transaction_.get()) 1192 return; 1193 1194 DCHECK(!response_info_) << "should not have a response yet"; 1195 receive_headers_end_ = base::TimeTicks(); 1196 1197 ResetTimer(); 1198 1199 // No matter what, we want to report our status as IO pending since we will 1200 // be notifying our consumer asynchronously via OnStartCompleted. 1201 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1202 1203 int rv = transaction_->RestartIgnoringLastError(start_callback_); 1204 if (rv == ERR_IO_PENDING) 1205 return; 1206 1207 // The transaction started synchronously, but we need to notify the 1208 // URLRequest delegate via the message loop. 1209 base::MessageLoop::current()->PostTask( 1210 FROM_HERE, 1211 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1212 weak_factory_.GetWeakPtr(), rv)); 1213} 1214 1215void URLRequestHttpJob::ResumeNetworkStart() { 1216 DCHECK(transaction_.get()); 1217 transaction_->ResumeNetworkStart(); 1218} 1219 1220bool URLRequestHttpJob::ShouldFixMismatchedContentLength(int rv) const { 1221 // Some servers send the body compressed, but specify the content length as 1222 // the uncompressed size. Although this violates the HTTP spec we want to 1223 // support it (as IE and FireFox do), but *only* for an exact match. 1224 // See http://crbug.com/79694. 1225 if (rv == net::ERR_CONTENT_LENGTH_MISMATCH || 1226 rv == net::ERR_INCOMPLETE_CHUNKED_ENCODING) { 1227 if (request_ && request_->response_headers()) { 1228 int64 expected_length = request_->response_headers()->GetContentLength(); 1229 VLOG(1) << __FUNCTION__ << "() " 1230 << "\"" << request_->url().spec() << "\"" 1231 << " content-length = " << expected_length 1232 << " pre total = " << prefilter_bytes_read() 1233 << " post total = " << postfilter_bytes_read(); 1234 if (postfilter_bytes_read() == expected_length) { 1235 // Clear the error. 1236 return true; 1237 } 1238 } 1239 } 1240 return false; 1241} 1242 1243bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size, 1244 int* bytes_read) { 1245 DCHECK_NE(buf_size, 0); 1246 DCHECK(bytes_read); 1247 DCHECK(!read_in_progress_); 1248 1249 int rv = transaction_->Read( 1250 buf, buf_size, 1251 base::Bind(&URLRequestHttpJob::OnReadCompleted, base::Unretained(this))); 1252 1253 if (ShouldFixMismatchedContentLength(rv)) 1254 rv = 0; 1255 1256 if (rv >= 0) { 1257 *bytes_read = rv; 1258 if (!rv) 1259 DoneWithRequest(FINISHED); 1260 return true; 1261 } 1262 1263 if (rv == ERR_IO_PENDING) { 1264 read_in_progress_ = true; 1265 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1266 } else { 1267 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); 1268 } 1269 1270 return false; 1271} 1272 1273void URLRequestHttpJob::StopCaching() { 1274 if (transaction_.get()) 1275 transaction_->StopCaching(); 1276} 1277 1278bool URLRequestHttpJob::GetFullRequestHeaders( 1279 HttpRequestHeaders* headers) const { 1280 if (!transaction_) 1281 return false; 1282 1283 return transaction_->GetFullRequestHeaders(headers); 1284} 1285 1286int64 URLRequestHttpJob::GetTotalReceivedBytes() const { 1287 if (!transaction_) 1288 return 0; 1289 1290 return transaction_->GetTotalReceivedBytes(); 1291} 1292 1293void URLRequestHttpJob::DoneReading() { 1294 if (transaction_) { 1295 transaction_->DoneReading(); 1296 } 1297 DoneWithRequest(FINISHED); 1298} 1299 1300void URLRequestHttpJob::DoneReadingRedirectResponse() { 1301 if (transaction_) { 1302 if (transaction_->GetResponseInfo()->headers->IsRedirect(NULL)) { 1303 // If the original headers indicate a redirect, go ahead and cache the 1304 // response, even if the |override_response_headers_| are a redirect to 1305 // another location. 1306 transaction_->DoneReading(); 1307 } else { 1308 // Otherwise, |override_response_headers_| must be non-NULL and contain 1309 // bogus headers indicating a redirect. 1310 DCHECK(override_response_headers_); 1311 DCHECK(override_response_headers_->IsRedirect(NULL)); 1312 transaction_->StopCaching(); 1313 } 1314 } 1315 DoneWithRequest(FINISHED); 1316} 1317 1318HostPortPair URLRequestHttpJob::GetSocketAddress() const { 1319 return response_info_ ? response_info_->socket_address : HostPortPair(); 1320} 1321 1322void URLRequestHttpJob::RecordTimer() { 1323 if (request_creation_time_.is_null()) { 1324 NOTREACHED() 1325 << "The same transaction shouldn't start twice without new timing."; 1326 return; 1327 } 1328 1329 base::TimeDelta to_start = base::Time::Now() - request_creation_time_; 1330 request_creation_time_ = base::Time(); 1331 1332 UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte", to_start); 1333} 1334 1335void URLRequestHttpJob::ResetTimer() { 1336 if (!request_creation_time_.is_null()) { 1337 NOTREACHED() 1338 << "The timer was reset before it was recorded."; 1339 return; 1340 } 1341 request_creation_time_ = base::Time::Now(); 1342} 1343 1344void URLRequestHttpJob::UpdatePacketReadTimes() { 1345 if (!packet_timing_enabled_) 1346 return; 1347 1348 if (filter_input_byte_count() <= bytes_observed_in_packets_) { 1349 DCHECK_EQ(filter_input_byte_count(), bytes_observed_in_packets_); 1350 return; // No new bytes have arrived. 1351 } 1352 1353 final_packet_time_ = base::Time::Now(); 1354 if (!bytes_observed_in_packets_) 1355 request_time_snapshot_ = request_ ? request_->request_time() : base::Time(); 1356 1357 bytes_observed_in_packets_ = filter_input_byte_count(); 1358} 1359 1360void URLRequestHttpJob::RecordPacketStats( 1361 FilterContext::StatisticSelector statistic) const { 1362 if (!packet_timing_enabled_ || (final_packet_time_ == base::Time())) 1363 return; 1364 1365 base::TimeDelta duration = final_packet_time_ - request_time_snapshot_; 1366 switch (statistic) { 1367 case FilterContext::SDCH_DECODE: { 1368 UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b", 1369 static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100); 1370 return; 1371 } 1372 case FilterContext::SDCH_PASSTHROUGH: { 1373 // Despite advertising a dictionary, we handled non-sdch compressed 1374 // content. 1375 return; 1376 } 1377 1378 case FilterContext::SDCH_EXPERIMENT_DECODE: { 1379 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Decode", 1380 duration, 1381 base::TimeDelta::FromMilliseconds(20), 1382 base::TimeDelta::FromMinutes(10), 100); 1383 return; 1384 } 1385 case FilterContext::SDCH_EXPERIMENT_HOLDBACK: { 1386 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Holdback", 1387 duration, 1388 base::TimeDelta::FromMilliseconds(20), 1389 base::TimeDelta::FromMinutes(10), 100); 1390 return; 1391 } 1392 default: 1393 NOTREACHED(); 1394 return; 1395 } 1396} 1397 1398// The common type of histogram we use for all compression-tracking histograms. 1399#define COMPRESSION_HISTOGRAM(name, sample) \ 1400 do { \ 1401 UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \ 1402 500, 1000000, 100); \ 1403 } while (0) 1404 1405void URLRequestHttpJob::RecordCompressionHistograms() { 1406 DCHECK(request_); 1407 if (!request_) 1408 return; 1409 1410 if (is_cached_content_ || // Don't record cached content 1411 !GetStatus().is_success() || // Don't record failed content 1412 !IsCompressibleContent() || // Only record compressible content 1413 !prefilter_bytes_read()) // Zero-byte responses aren't useful. 1414 return; 1415 1416 // Miniature requests aren't really compressible. Don't count them. 1417 const int kMinSize = 16; 1418 if (prefilter_bytes_read() < kMinSize) 1419 return; 1420 1421 // Only record for http or https urls. 1422 bool is_http = request_->url().SchemeIs("http"); 1423 bool is_https = request_->url().SchemeIs("https"); 1424 if (!is_http && !is_https) 1425 return; 1426 1427 int compressed_B = prefilter_bytes_read(); 1428 int decompressed_B = postfilter_bytes_read(); 1429 bool was_filtered = HasFilter(); 1430 1431 // We want to record how often downloaded resources are compressed. 1432 // But, we recognize that different protocols may have different 1433 // properties. So, for each request, we'll put it into one of 3 1434 // groups: 1435 // a) SSL resources 1436 // Proxies cannot tamper with compression headers with SSL. 1437 // b) Non-SSL, loaded-via-proxy resources 1438 // In this case, we know a proxy might have interfered. 1439 // c) Non-SSL, loaded-without-proxy resources 1440 // In this case, we know there was no explicit proxy. However, 1441 // it is possible that a transparent proxy was still interfering. 1442 // 1443 // For each group, we record the same 3 histograms. 1444 1445 if (is_https) { 1446 if (was_filtered) { 1447 COMPRESSION_HISTOGRAM("SSL.BytesBeforeCompression", compressed_B); 1448 COMPRESSION_HISTOGRAM("SSL.BytesAfterCompression", decompressed_B); 1449 } else { 1450 COMPRESSION_HISTOGRAM("SSL.ShouldHaveBeenCompressed", decompressed_B); 1451 } 1452 return; 1453 } 1454 1455 if (request_->was_fetched_via_proxy()) { 1456 if (was_filtered) { 1457 COMPRESSION_HISTOGRAM("Proxy.BytesBeforeCompression", compressed_B); 1458 COMPRESSION_HISTOGRAM("Proxy.BytesAfterCompression", decompressed_B); 1459 } else { 1460 COMPRESSION_HISTOGRAM("Proxy.ShouldHaveBeenCompressed", decompressed_B); 1461 } 1462 return; 1463 } 1464 1465 if (was_filtered) { 1466 COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B); 1467 COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B); 1468 } else { 1469 COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B); 1470 } 1471} 1472 1473bool URLRequestHttpJob::IsCompressibleContent() const { 1474 std::string mime_type; 1475 return GetMimeType(&mime_type) && 1476 (IsSupportedJavascriptMimeType(mime_type.c_str()) || 1477 IsSupportedNonImageMimeType(mime_type.c_str())); 1478} 1479 1480void URLRequestHttpJob::RecordPerfHistograms(CompletionCause reason) { 1481 if (start_time_.is_null()) 1482 return; 1483 1484 base::TimeDelta total_time = base::TimeTicks::Now() - start_time_; 1485 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTime", total_time); 1486 1487 if (reason == FINISHED) { 1488 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeSuccess", total_time); 1489 } else { 1490 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCancel", total_time); 1491 } 1492 1493 if (response_info_) { 1494 if (response_info_->was_cached) { 1495 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCached", total_time); 1496 } else { 1497 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeNotCached", total_time); 1498 } 1499 } 1500 1501 if (request_info_.load_flags & LOAD_PREFETCH && !request_->was_cached()) 1502 UMA_HISTOGRAM_COUNTS("Net.Prefetch.PrefilterBytesReadFromNetwork", 1503 prefilter_bytes_read()); 1504 1505 start_time_ = base::TimeTicks(); 1506} 1507 1508void URLRequestHttpJob::DoneWithRequest(CompletionCause reason) { 1509 if (done_) 1510 return; 1511 done_ = true; 1512 RecordPerfHistograms(reason); 1513 if (reason == FINISHED) { 1514 request_->set_received_response_content_length(prefilter_bytes_read()); 1515 RecordCompressionHistograms(); 1516 } 1517} 1518 1519HttpResponseHeaders* URLRequestHttpJob::GetResponseHeaders() const { 1520 DCHECK(transaction_.get()); 1521 DCHECK(transaction_->GetResponseInfo()); 1522 return override_response_headers_.get() ? 1523 override_response_headers_.get() : 1524 transaction_->GetResponseInfo()->headers.get(); 1525} 1526 1527void URLRequestHttpJob::NotifyURLRequestDestroyed() { 1528 awaiting_callback_ = false; 1529} 1530 1531} // namespace net 1532