url_request_http_job.cc revision c5cede9ae108bb15f6b7a8aea21c7e1fefa2834c
1// Copyright (c) 2012 The Chromium Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "net/url_request/url_request_http_job.h" 6 7#include "base/base_switches.h" 8#include "base/bind.h" 9#include "base/bind_helpers.h" 10#include "base/command_line.h" 11#include "base/compiler_specific.h" 12#include "base/file_version_info.h" 13#include "base/message_loop/message_loop.h" 14#include "base/metrics/field_trial.h" 15#include "base/metrics/histogram.h" 16#include "base/rand_util.h" 17#include "base/strings/string_util.h" 18#include "base/time/time.h" 19#include "net/base/host_port_pair.h" 20#include "net/base/load_flags.h" 21#include "net/base/mime_util.h" 22#include "net/base/net_errors.h" 23#include "net/base/net_util.h" 24#include "net/base/network_delegate.h" 25#include "net/base/sdch_manager.h" 26#include "net/cert/cert_status_flags.h" 27#include "net/cookies/cookie_store.h" 28#include "net/http/http_content_disposition.h" 29#include "net/http/http_network_session.h" 30#include "net/http/http_request_headers.h" 31#include "net/http/http_response_headers.h" 32#include "net/http/http_response_info.h" 33#include "net/http/http_status_code.h" 34#include "net/http/http_transaction.h" 35#include "net/http/http_transaction_factory.h" 36#include "net/http/http_util.h" 37#include "net/ssl/ssl_cert_request_info.h" 38#include "net/ssl/ssl_config_service.h" 39#include "net/url_request/fraudulent_certificate_reporter.h" 40#include "net/url_request/http_user_agent_settings.h" 41#include "net/url_request/url_request.h" 42#include "net/url_request/url_request_context.h" 43#include "net/url_request/url_request_error_job.h" 44#include "net/url_request/url_request_job_factory.h" 45#include "net/url_request/url_request_redirect_job.h" 46#include "net/url_request/url_request_throttler_header_adapter.h" 47#include "net/url_request/url_request_throttler_manager.h" 48#include "net/websockets/websocket_handshake_stream_base.h" 49 50static const char kAvailDictionaryHeader[] = "Avail-Dictionary"; 51 52namespace net { 53 54class URLRequestHttpJob::HttpFilterContext : public FilterContext { 55 public: 56 explicit HttpFilterContext(URLRequestHttpJob* job); 57 virtual ~HttpFilterContext(); 58 59 // FilterContext implementation. 60 virtual bool GetMimeType(std::string* mime_type) const OVERRIDE; 61 virtual bool GetURL(GURL* gurl) const OVERRIDE; 62 virtual bool GetContentDisposition(std::string* disposition) const OVERRIDE; 63 virtual base::Time GetRequestTime() const OVERRIDE; 64 virtual bool IsCachedContent() const OVERRIDE; 65 virtual bool IsDownload() const OVERRIDE; 66 virtual bool IsSdchResponse() const OVERRIDE; 67 virtual int64 GetByteReadCount() const OVERRIDE; 68 virtual int GetResponseCode() const OVERRIDE; 69 virtual void RecordPacketStats(StatisticSelector statistic) const OVERRIDE; 70 71 // Method to allow us to reset filter context for a response that should have 72 // been SDCH encoded when there is an update due to an explicit HTTP header. 73 void ResetSdchResponseToFalse(); 74 75 private: 76 URLRequestHttpJob* job_; 77 78 DISALLOW_COPY_AND_ASSIGN(HttpFilterContext); 79}; 80 81URLRequestHttpJob::HttpFilterContext::HttpFilterContext(URLRequestHttpJob* job) 82 : job_(job) { 83 DCHECK(job_); 84} 85 86URLRequestHttpJob::HttpFilterContext::~HttpFilterContext() { 87} 88 89bool URLRequestHttpJob::HttpFilterContext::GetMimeType( 90 std::string* mime_type) const { 91 return job_->GetMimeType(mime_type); 92} 93 94bool URLRequestHttpJob::HttpFilterContext::GetURL(GURL* gurl) const { 95 if (!job_->request()) 96 return false; 97 *gurl = job_->request()->url(); 98 return true; 99} 100 101bool URLRequestHttpJob::HttpFilterContext::GetContentDisposition( 102 std::string* disposition) const { 103 HttpResponseHeaders* headers = job_->GetResponseHeaders(); 104 void *iter = NULL; 105 return headers->EnumerateHeader(&iter, "Content-Disposition", disposition); 106} 107 108base::Time URLRequestHttpJob::HttpFilterContext::GetRequestTime() const { 109 return job_->request() ? job_->request()->request_time() : base::Time(); 110} 111 112bool URLRequestHttpJob::HttpFilterContext::IsCachedContent() const { 113 return job_->is_cached_content_; 114} 115 116bool URLRequestHttpJob::HttpFilterContext::IsDownload() const { 117 return (job_->request_info_.load_flags & LOAD_IS_DOWNLOAD) != 0; 118} 119 120void URLRequestHttpJob::HttpFilterContext::ResetSdchResponseToFalse() { 121 DCHECK(job_->sdch_dictionary_advertised_); 122 job_->sdch_dictionary_advertised_ = false; 123} 124 125bool URLRequestHttpJob::HttpFilterContext::IsSdchResponse() const { 126 return job_->sdch_dictionary_advertised_; 127} 128 129int64 URLRequestHttpJob::HttpFilterContext::GetByteReadCount() const { 130 return job_->filter_input_byte_count(); 131} 132 133int URLRequestHttpJob::HttpFilterContext::GetResponseCode() const { 134 return job_->GetResponseCode(); 135} 136 137void URLRequestHttpJob::HttpFilterContext::RecordPacketStats( 138 StatisticSelector statistic) const { 139 job_->RecordPacketStats(statistic); 140} 141 142// TODO(darin): make sure the port blocking code is not lost 143// static 144URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request, 145 NetworkDelegate* network_delegate, 146 const std::string& scheme) { 147 DCHECK(scheme == "http" || scheme == "https" || scheme == "ws" || 148 scheme == "wss"); 149 150 if (!request->context()->http_transaction_factory()) { 151 NOTREACHED() << "requires a valid context"; 152 return new URLRequestErrorJob( 153 request, network_delegate, ERR_INVALID_ARGUMENT); 154 } 155 156 GURL redirect_url; 157 if (request->GetHSTSRedirect(&redirect_url)) { 158 return new URLRequestRedirectJob( 159 request, network_delegate, redirect_url, 160 // Use status code 307 to preserve the method, so POST requests work. 161 URLRequestRedirectJob::REDIRECT_307_TEMPORARY_REDIRECT, "HSTS"); 162 } 163 return new URLRequestHttpJob(request, 164 network_delegate, 165 request->context()->http_user_agent_settings()); 166} 167 168URLRequestHttpJob::URLRequestHttpJob( 169 URLRequest* request, 170 NetworkDelegate* network_delegate, 171 const HttpUserAgentSettings* http_user_agent_settings) 172 : URLRequestJob(request, network_delegate), 173 priority_(DEFAULT_PRIORITY), 174 response_info_(NULL), 175 response_cookies_save_index_(0), 176 proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH), 177 server_auth_state_(AUTH_STATE_DONT_NEED_AUTH), 178 start_callback_(base::Bind(&URLRequestHttpJob::OnStartCompleted, 179 base::Unretained(this))), 180 notify_before_headers_sent_callback_( 181 base::Bind(&URLRequestHttpJob::NotifyBeforeSendHeadersCallback, 182 base::Unretained(this))), 183 read_in_progress_(false), 184 throttling_entry_(NULL), 185 sdch_dictionary_advertised_(false), 186 sdch_test_activated_(false), 187 sdch_test_control_(false), 188 is_cached_content_(false), 189 request_creation_time_(), 190 packet_timing_enabled_(false), 191 done_(false), 192 bytes_observed_in_packets_(0), 193 request_time_snapshot_(), 194 final_packet_time_(), 195 filter_context_(new HttpFilterContext(this)), 196 weak_factory_(this), 197 on_headers_received_callback_( 198 base::Bind(&URLRequestHttpJob::OnHeadersReceivedCallback, 199 base::Unretained(this))), 200 awaiting_callback_(false), 201 http_user_agent_settings_(http_user_agent_settings) { 202 URLRequestThrottlerManager* manager = request->context()->throttler_manager(); 203 if (manager) 204 throttling_entry_ = manager->RegisterRequestUrl(request->url()); 205 206 ResetTimer(); 207} 208 209URLRequestHttpJob::~URLRequestHttpJob() { 210 CHECK(!awaiting_callback_); 211 212 DCHECK(!sdch_test_control_ || !sdch_test_activated_); 213 if (!is_cached_content_) { 214 if (sdch_test_control_) 215 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK); 216 if (sdch_test_activated_) 217 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE); 218 } 219 // Make sure SDCH filters are told to emit histogram data while 220 // filter_context_ is still alive. 221 DestroyFilters(); 222 223 if (sdch_dictionary_url_.is_valid()) { 224 // Prior to reaching the destructor, request_ has been set to a NULL 225 // pointer, so request_->url() is no longer valid in the destructor, and we 226 // use an alternate copy |request_info_.url|. 227 SdchManager* manager = SdchManager::Global(); 228 // To be extra safe, since this is a "different time" from when we decided 229 // to get the dictionary, we'll validate that an SdchManager is available. 230 // At shutdown time, care is taken to be sure that we don't delete this 231 // globally useful instance "too soon," so this check is just defensive 232 // coding to assure that IF the system is shutting down, we don't have any 233 // problem if the manager was deleted ahead of time. 234 if (manager) // Defensive programming. 235 manager->FetchDictionary(request_info_.url, sdch_dictionary_url_); 236 } 237 DoneWithRequest(ABORTED); 238} 239 240void URLRequestHttpJob::SetPriority(RequestPriority priority) { 241 priority_ = priority; 242 if (transaction_) 243 transaction_->SetPriority(priority_); 244} 245 246void URLRequestHttpJob::Start() { 247 DCHECK(!transaction_.get()); 248 249 // URLRequest::SetReferrer ensures that we do not send username and password 250 // fields in the referrer. 251 GURL referrer(request_->referrer()); 252 253 request_info_.url = request_->url(); 254 request_info_.method = request_->method(); 255 request_info_.load_flags = request_->load_flags(); 256 // Enable privacy mode if cookie settings or flags tell us not send or 257 // save cookies. 258 bool enable_privacy_mode = 259 (request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES) || 260 (request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) || 261 CanEnablePrivacyMode(); 262 // Privacy mode could still be disabled in OnCookiesLoaded if we are going 263 // to send previously saved cookies. 264 request_info_.privacy_mode = enable_privacy_mode ? 265 PRIVACY_MODE_ENABLED : PRIVACY_MODE_DISABLED; 266 267 // Strip Referer from request_info_.extra_headers to prevent, e.g., plugins 268 // from overriding headers that are controlled using other means. Otherwise a 269 // plugin could set a referrer although sending the referrer is inhibited. 270 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kReferer); 271 272 // Our consumer should have made sure that this is a safe referrer. See for 273 // instance WebCore::FrameLoader::HideReferrer. 274 if (referrer.is_valid()) { 275 request_info_.extra_headers.SetHeader(HttpRequestHeaders::kReferer, 276 referrer.spec()); 277 } 278 279 request_info_.extra_headers.SetHeaderIfMissing( 280 HttpRequestHeaders::kUserAgent, 281 http_user_agent_settings_ ? 282 http_user_agent_settings_->GetUserAgent() : std::string()); 283 284 AddExtraHeaders(); 285 AddCookieHeaderAndStart(); 286} 287 288void URLRequestHttpJob::Kill() { 289 if (!transaction_.get()) 290 return; 291 292 weak_factory_.InvalidateWeakPtrs(); 293 DestroyTransaction(); 294 URLRequestJob::Kill(); 295} 296 297void URLRequestHttpJob::NotifyHeadersComplete() { 298 DCHECK(!response_info_); 299 300 response_info_ = transaction_->GetResponseInfo(); 301 302 // Save boolean, as we'll need this info at destruction time, and filters may 303 // also need this info. 304 is_cached_content_ = response_info_->was_cached; 305 306 if (!is_cached_content_ && throttling_entry_.get()) { 307 URLRequestThrottlerHeaderAdapter response_adapter(GetResponseHeaders()); 308 throttling_entry_->UpdateWithResponse(request_info_.url.host(), 309 &response_adapter); 310 } 311 312 // The ordering of these calls is not important. 313 ProcessStrictTransportSecurityHeader(); 314 ProcessPublicKeyPinsHeader(); 315 316 if (SdchManager::Global() && 317 SdchManager::Global()->IsInSupportedDomain(request_->url())) { 318 const std::string name = "Get-Dictionary"; 319 std::string url_text; 320 void* iter = NULL; 321 // TODO(jar): We need to not fetch dictionaries the first time they are 322 // seen, but rather wait until we can justify their usefulness. 323 // For now, we will only fetch the first dictionary, which will at least 324 // require multiple suggestions before we get additional ones for this site. 325 // Eventually we should wait until a dictionary is requested several times 326 // before we even download it (so that we don't waste memory or bandwidth). 327 if (GetResponseHeaders()->EnumerateHeader(&iter, name, &url_text)) { 328 // request_->url() won't be valid in the destructor, so we use an 329 // alternate copy. 330 DCHECK_EQ(request_->url(), request_info_.url); 331 // Resolve suggested URL relative to request url. 332 sdch_dictionary_url_ = request_info_.url.Resolve(url_text); 333 } 334 } 335 336 // The HTTP transaction may be restarted several times for the purposes 337 // of sending authorization information. Each time it restarts, we get 338 // notified of the headers completion so that we can update the cookie store. 339 if (transaction_->IsReadyToRestartForAuth()) { 340 DCHECK(!response_info_->auth_challenge.get()); 341 // TODO(battre): This breaks the webrequest API for 342 // URLRequestTestHTTP.BasicAuthWithCookies 343 // where OnBeforeSendHeaders -> OnSendHeaders -> OnBeforeSendHeaders 344 // occurs. 345 RestartTransactionWithAuth(AuthCredentials()); 346 return; 347 } 348 349 URLRequestJob::NotifyHeadersComplete(); 350} 351 352void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) { 353 DoneWithRequest(FINISHED); 354 URLRequestJob::NotifyDone(status); 355} 356 357void URLRequestHttpJob::DestroyTransaction() { 358 DCHECK(transaction_.get()); 359 360 DoneWithRequest(ABORTED); 361 transaction_.reset(); 362 response_info_ = NULL; 363 receive_headers_end_ = base::TimeTicks(); 364} 365 366void URLRequestHttpJob::StartTransaction() { 367 if (network_delegate()) { 368 OnCallToDelegate(); 369 int rv = network_delegate()->NotifyBeforeSendHeaders( 370 request_, notify_before_headers_sent_callback_, 371 &request_info_.extra_headers); 372 // If an extension blocks the request, we rely on the callback to 373 // MaybeStartTransactionInternal(). 374 if (rv == ERR_IO_PENDING) 375 return; 376 MaybeStartTransactionInternal(rv); 377 return; 378 } 379 StartTransactionInternal(); 380} 381 382void URLRequestHttpJob::NotifyBeforeSendHeadersCallback(int result) { 383 // Check that there are no callbacks to already canceled requests. 384 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); 385 386 MaybeStartTransactionInternal(result); 387} 388 389void URLRequestHttpJob::MaybeStartTransactionInternal(int result) { 390 OnCallToDelegateComplete(); 391 if (result == OK) { 392 StartTransactionInternal(); 393 } else { 394 std::string source("delegate"); 395 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 396 NetLog::StringCallback("source", &source)); 397 NotifyCanceled(); 398 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 399 } 400} 401 402void URLRequestHttpJob::StartTransactionInternal() { 403 // NOTE: This method assumes that request_info_ is already setup properly. 404 405 // If we already have a transaction, then we should restart the transaction 406 // with auth provided by auth_credentials_. 407 408 int rv; 409 410 if (network_delegate()) { 411 network_delegate()->NotifySendHeaders( 412 request_, request_info_.extra_headers); 413 } 414 415 if (transaction_.get()) { 416 rv = transaction_->RestartWithAuth(auth_credentials_, start_callback_); 417 auth_credentials_ = AuthCredentials(); 418 } else { 419 DCHECK(request_->context()->http_transaction_factory()); 420 421 rv = request_->context()->http_transaction_factory()->CreateTransaction( 422 priority_, &transaction_); 423 424 if (rv == OK && request_info_.url.SchemeIsWSOrWSS()) { 425 // TODO(ricea): Implement WebSocket throttling semantics as defined in 426 // RFC6455 Section 4.1. 427 base::SupportsUserData::Data* data = request_->GetUserData( 428 WebSocketHandshakeStreamBase::CreateHelper::DataKey()); 429 if (data) { 430 transaction_->SetWebSocketHandshakeStreamCreateHelper( 431 static_cast<WebSocketHandshakeStreamBase::CreateHelper*>(data)); 432 } else { 433 rv = ERR_DISALLOWED_URL_SCHEME; 434 } 435 } 436 437 if (rv == OK) { 438 transaction_->SetBeforeNetworkStartCallback( 439 base::Bind(&URLRequestHttpJob::NotifyBeforeNetworkStart, 440 base::Unretained(this))); 441 442 if (!throttling_entry_.get() || 443 !throttling_entry_->ShouldRejectRequest(*request_)) { 444 rv = transaction_->Start( 445 &request_info_, start_callback_, request_->net_log()); 446 start_time_ = base::TimeTicks::Now(); 447 } else { 448 // Special error code for the exponential back-off module. 449 rv = ERR_TEMPORARILY_THROTTLED; 450 } 451 } 452 } 453 454 if (rv == ERR_IO_PENDING) 455 return; 456 457 // The transaction started synchronously, but we need to notify the 458 // URLRequest delegate via the message loop. 459 base::MessageLoop::current()->PostTask( 460 FROM_HERE, 461 base::Bind(&URLRequestHttpJob::OnStartCompleted, 462 weak_factory_.GetWeakPtr(), rv)); 463} 464 465void URLRequestHttpJob::AddExtraHeaders() { 466 // Supply Accept-Encoding field only if it is not already provided. 467 // It should be provided IF the content is known to have restrictions on 468 // potential encoding, such as streaming multi-media. 469 // For details see bug 47381. 470 // TODO(jar, enal): jpeg files etc. should set up a request header if 471 // possible. Right now it is done only by buffered_resource_loader and 472 // simple_data_source. 473 if (!request_info_.extra_headers.HasHeader( 474 HttpRequestHeaders::kAcceptEncoding)) { 475 bool advertise_sdch = SdchManager::Global() && 476 SdchManager::Global()->IsInSupportedDomain(request_->url()); 477 std::string avail_dictionaries; 478 if (advertise_sdch) { 479 SdchManager::Global()->GetAvailDictionaryList(request_->url(), 480 &avail_dictionaries); 481 482 // The AllowLatencyExperiment() is only true if we've successfully done a 483 // full SDCH compression recently in this browser session for this host. 484 // Note that for this path, there might be no applicable dictionaries, 485 // and hence we can't participate in the experiment. 486 if (!avail_dictionaries.empty() && 487 SdchManager::Global()->AllowLatencyExperiment(request_->url())) { 488 // We are participating in the test (or control), and hence we'll 489 // eventually record statistics via either SDCH_EXPERIMENT_DECODE or 490 // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data. 491 packet_timing_enabled_ = true; 492 if (base::RandDouble() < .01) { 493 sdch_test_control_ = true; // 1% probability. 494 advertise_sdch = false; 495 } else { 496 sdch_test_activated_ = true; 497 } 498 } 499 } 500 501 // Supply Accept-Encoding headers first so that it is more likely that they 502 // will be in the first transmitted packet. This can sometimes make it 503 // easier to filter and analyze the streams to assure that a proxy has not 504 // damaged these headers. Some proxies deliberately corrupt Accept-Encoding 505 // headers. 506 if (!advertise_sdch) { 507 // Tell the server what compression formats we support (other than SDCH). 508 request_info_.extra_headers.SetHeader( 509 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate"); 510 } else { 511 // Include SDCH in acceptable list. 512 request_info_.extra_headers.SetHeader( 513 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch"); 514 if (!avail_dictionaries.empty()) { 515 request_info_.extra_headers.SetHeader( 516 kAvailDictionaryHeader, 517 avail_dictionaries); 518 sdch_dictionary_advertised_ = true; 519 // Since we're tagging this transaction as advertising a dictionary, 520 // we'll definitely employ an SDCH filter (or tentative sdch filter) 521 // when we get a response. When done, we'll record histograms via 522 // SDCH_DECODE or SDCH_PASSTHROUGH. Hence we need to record packet 523 // arrival times. 524 packet_timing_enabled_ = true; 525 } 526 } 527 } 528 529 if (http_user_agent_settings_) { 530 // Only add default Accept-Language if the request didn't have it 531 // specified. 532 std::string accept_language = 533 http_user_agent_settings_->GetAcceptLanguage(); 534 if (!accept_language.empty()) { 535 request_info_.extra_headers.SetHeaderIfMissing( 536 HttpRequestHeaders::kAcceptLanguage, 537 accept_language); 538 } 539 } 540} 541 542void URLRequestHttpJob::AddCookieHeaderAndStart() { 543 // No matter what, we want to report our status as IO pending since we will 544 // be notifying our consumer asynchronously via OnStartCompleted. 545 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 546 547 // If the request was destroyed, then there is no more work to do. 548 if (!request_) 549 return; 550 551 CookieStore* cookie_store = GetCookieStore(); 552 if (cookie_store && !(request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES)) { 553 cookie_store->GetAllCookiesForURLAsync( 554 request_->url(), 555 base::Bind(&URLRequestHttpJob::CheckCookiePolicyAndLoad, 556 weak_factory_.GetWeakPtr())); 557 } else { 558 DoStartTransaction(); 559 } 560} 561 562void URLRequestHttpJob::DoLoadCookies() { 563 CookieOptions options; 564 options.set_include_httponly(); 565 GetCookieStore()->GetCookiesWithOptionsAsync( 566 request_->url(), options, 567 base::Bind(&URLRequestHttpJob::OnCookiesLoaded, 568 weak_factory_.GetWeakPtr())); 569} 570 571void URLRequestHttpJob::CheckCookiePolicyAndLoad( 572 const CookieList& cookie_list) { 573 if (CanGetCookies(cookie_list)) 574 DoLoadCookies(); 575 else 576 DoStartTransaction(); 577} 578 579void URLRequestHttpJob::OnCookiesLoaded(const std::string& cookie_line) { 580 if (!cookie_line.empty()) { 581 request_info_.extra_headers.SetHeader( 582 HttpRequestHeaders::kCookie, cookie_line); 583 // Disable privacy mode as we are sending cookies anyway. 584 request_info_.privacy_mode = PRIVACY_MODE_DISABLED; 585 } 586 DoStartTransaction(); 587} 588 589void URLRequestHttpJob::DoStartTransaction() { 590 // We may have been canceled while retrieving cookies. 591 if (GetStatus().is_success()) { 592 StartTransaction(); 593 } else { 594 NotifyCanceled(); 595 } 596} 597 598void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete(int result) { 599 // End of the call started in OnStartCompleted. 600 OnCallToDelegateComplete(); 601 602 if (result != net::OK) { 603 std::string source("delegate"); 604 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 605 NetLog::StringCallback("source", &source)); 606 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 607 return; 608 } 609 610 DCHECK(transaction_.get()); 611 612 const HttpResponseInfo* response_info = transaction_->GetResponseInfo(); 613 DCHECK(response_info); 614 615 response_cookies_.clear(); 616 response_cookies_save_index_ = 0; 617 618 FetchResponseCookies(&response_cookies_); 619 620 if (!GetResponseHeaders()->GetDateValue(&response_date_)) 621 response_date_ = base::Time(); 622 623 // Now, loop over the response cookies, and attempt to persist each. 624 SaveNextCookie(); 625} 626 627// If the save occurs synchronously, SaveNextCookie will loop and save the next 628// cookie. If the save is deferred, the callback is responsible for continuing 629// to iterate through the cookies. 630// TODO(erikwright): Modify the CookieStore API to indicate via return value 631// whether it completed synchronously or asynchronously. 632// See http://crbug.com/131066. 633void URLRequestHttpJob::SaveNextCookie() { 634 // No matter what, we want to report our status as IO pending since we will 635 // be notifying our consumer asynchronously via OnStartCompleted. 636 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 637 638 // Used to communicate with the callback. See the implementation of 639 // OnCookieSaved. 640 scoped_refptr<SharedBoolean> callback_pending = new SharedBoolean(false); 641 scoped_refptr<SharedBoolean> save_next_cookie_running = 642 new SharedBoolean(true); 643 644 if (!(request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) && 645 GetCookieStore() && response_cookies_.size() > 0) { 646 CookieOptions options; 647 options.set_include_httponly(); 648 options.set_server_time(response_date_); 649 650 net::CookieStore::SetCookiesCallback callback( 651 base::Bind(&URLRequestHttpJob::OnCookieSaved, 652 weak_factory_.GetWeakPtr(), 653 save_next_cookie_running, 654 callback_pending)); 655 656 // Loop through the cookies as long as SetCookieWithOptionsAsync completes 657 // synchronously. 658 while (!callback_pending->data && 659 response_cookies_save_index_ < response_cookies_.size()) { 660 if (CanSetCookie( 661 response_cookies_[response_cookies_save_index_], &options)) { 662 callback_pending->data = true; 663 GetCookieStore()->SetCookieWithOptionsAsync( 664 request_->url(), response_cookies_[response_cookies_save_index_], 665 options, callback); 666 } 667 ++response_cookies_save_index_; 668 } 669 } 670 671 save_next_cookie_running->data = false; 672 673 if (!callback_pending->data) { 674 response_cookies_.clear(); 675 response_cookies_save_index_ = 0; 676 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status 677 NotifyHeadersComplete(); 678 return; 679 } 680} 681 682// |save_next_cookie_running| is true when the callback is bound and set to 683// false when SaveNextCookie exits, allowing the callback to determine if the 684// save occurred synchronously or asynchronously. 685// |callback_pending| is false when the callback is invoked and will be set to 686// true by the callback, allowing SaveNextCookie to detect whether the save 687// occurred synchronously. 688// See SaveNextCookie() for more information. 689void URLRequestHttpJob::OnCookieSaved( 690 scoped_refptr<SharedBoolean> save_next_cookie_running, 691 scoped_refptr<SharedBoolean> callback_pending, 692 bool cookie_status) { 693 callback_pending->data = false; 694 695 // If we were called synchronously, return. 696 if (save_next_cookie_running->data) { 697 return; 698 } 699 700 // We were called asynchronously, so trigger the next save. 701 // We may have been canceled within OnSetCookie. 702 if (GetStatus().is_success()) { 703 SaveNextCookie(); 704 } else { 705 NotifyCanceled(); 706 } 707} 708 709void URLRequestHttpJob::FetchResponseCookies( 710 std::vector<std::string>* cookies) { 711 const std::string name = "Set-Cookie"; 712 std::string value; 713 714 void* iter = NULL; 715 HttpResponseHeaders* headers = GetResponseHeaders(); 716 while (headers->EnumerateHeader(&iter, name, &value)) { 717 if (!value.empty()) 718 cookies->push_back(value); 719 } 720} 721 722// NOTE: |ProcessStrictTransportSecurityHeader| and 723// |ProcessPublicKeyPinsHeader| have very similar structures, by design. 724void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() { 725 DCHECK(response_info_); 726 TransportSecurityState* security_state = 727 request_->context()->transport_security_state(); 728 const SSLInfo& ssl_info = response_info_->ssl_info; 729 730 // Only accept HSTS headers on HTTPS connections that have no 731 // certificate errors. 732 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) || 733 !security_state) 734 return; 735 736 // http://tools.ietf.org/html/draft-ietf-websec-strict-transport-sec: 737 // 738 // If a UA receives more than one STS header field in a HTTP response 739 // message over secure transport, then the UA MUST process only the 740 // first such header field. 741 HttpResponseHeaders* headers = GetResponseHeaders(); 742 std::string value; 743 if (headers->EnumerateHeader(NULL, "Strict-Transport-Security", &value)) 744 security_state->AddHSTSHeader(request_info_.url.host(), value); 745} 746 747void URLRequestHttpJob::ProcessPublicKeyPinsHeader() { 748 DCHECK(response_info_); 749 TransportSecurityState* security_state = 750 request_->context()->transport_security_state(); 751 const SSLInfo& ssl_info = response_info_->ssl_info; 752 753 // Only accept HPKP headers on HTTPS connections that have no 754 // certificate errors. 755 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) || 756 !security_state) 757 return; 758 759 // http://tools.ietf.org/html/draft-ietf-websec-key-pinning: 760 // 761 // If a UA receives more than one PKP header field in an HTTP 762 // response message over secure transport, then the UA MUST process 763 // only the first such header field. 764 HttpResponseHeaders* headers = GetResponseHeaders(); 765 std::string value; 766 if (headers->EnumerateHeader(NULL, "Public-Key-Pins", &value)) 767 security_state->AddHPKPHeader(request_info_.url.host(), value, ssl_info); 768} 769 770void URLRequestHttpJob::OnStartCompleted(int result) { 771 RecordTimer(); 772 773 // If the request was destroyed, then there is no more work to do. 774 if (!request_) 775 return; 776 777 // If the job is done (due to cancellation), can just ignore this 778 // notification. 779 if (done_) 780 return; 781 782 receive_headers_end_ = base::TimeTicks::Now(); 783 784 // Clear the IO_PENDING status 785 SetStatus(URLRequestStatus()); 786 787 const URLRequestContext* context = request_->context(); 788 789 if (result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN && 790 transaction_->GetResponseInfo() != NULL) { 791 FraudulentCertificateReporter* reporter = 792 context->fraudulent_certificate_reporter(); 793 if (reporter != NULL) { 794 const SSLInfo& ssl_info = transaction_->GetResponseInfo()->ssl_info; 795 bool sni_available = SSLConfigService::IsSNIAvailable( 796 context->ssl_config_service()); 797 const std::string& host = request_->url().host(); 798 799 reporter->SendReport(host, ssl_info, sni_available); 800 } 801 } 802 803 if (result == OK) { 804 scoped_refptr<HttpResponseHeaders> headers = GetResponseHeaders(); 805 if (network_delegate()) { 806 // Note that |this| may not be deleted until 807 // |on_headers_received_callback_| or 808 // |NetworkDelegate::URLRequestDestroyed()| has been called. 809 OnCallToDelegate(); 810 allowed_unsafe_redirect_url_ = GURL(); 811 int error = network_delegate()->NotifyHeadersReceived( 812 request_, 813 on_headers_received_callback_, 814 headers.get(), 815 &override_response_headers_, 816 &allowed_unsafe_redirect_url_); 817 if (error != net::OK) { 818 if (error == net::ERR_IO_PENDING) { 819 awaiting_callback_ = true; 820 } else { 821 std::string source("delegate"); 822 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, 823 NetLog::StringCallback("source", 824 &source)); 825 OnCallToDelegateComplete(); 826 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, error)); 827 } 828 return; 829 } 830 } 831 832 SaveCookiesAndNotifyHeadersComplete(net::OK); 833 } else if (IsCertificateError(result)) { 834 // We encountered an SSL certificate error. 835 if (result == ERR_SSL_WEAK_SERVER_EPHEMERAL_DH_KEY || 836 result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN) { 837 // These are hard failures. They're handled separately and don't have 838 // the correct cert status, so set it here. 839 SSLInfo info(transaction_->GetResponseInfo()->ssl_info); 840 info.cert_status = MapNetErrorToCertStatus(result); 841 NotifySSLCertificateError(info, true); 842 } else { 843 // Maybe overridable, maybe not. Ask the delegate to decide. 844 TransportSecurityState::DomainState domain_state; 845 const URLRequestContext* context = request_->context(); 846 const bool fatal = context->transport_security_state() && 847 context->transport_security_state()->GetDomainState( 848 request_info_.url.host(), 849 SSLConfigService::IsSNIAvailable(context->ssl_config_service()), 850 &domain_state) && 851 domain_state.ShouldSSLErrorsBeFatal(); 852 NotifySSLCertificateError( 853 transaction_->GetResponseInfo()->ssl_info, fatal); 854 } 855 } else if (result == ERR_SSL_CLIENT_AUTH_CERT_NEEDED) { 856 NotifyCertificateRequested( 857 transaction_->GetResponseInfo()->cert_request_info.get()); 858 } else { 859 // Even on an error, there may be useful information in the response 860 // info (e.g. whether there's a cached copy). 861 if (transaction_.get()) 862 response_info_ = transaction_->GetResponseInfo(); 863 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); 864 } 865} 866 867void URLRequestHttpJob::OnHeadersReceivedCallback(int result) { 868 awaiting_callback_ = false; 869 870 // Check that there are no callbacks to already canceled requests. 871 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); 872 873 SaveCookiesAndNotifyHeadersComplete(result); 874} 875 876void URLRequestHttpJob::OnReadCompleted(int result) { 877 read_in_progress_ = false; 878 879 if (ShouldFixMismatchedContentLength(result)) 880 result = OK; 881 882 if (result == OK) { 883 NotifyDone(URLRequestStatus()); 884 } else if (result < 0) { 885 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); 886 } else { 887 // Clear the IO_PENDING status 888 SetStatus(URLRequestStatus()); 889 } 890 891 NotifyReadComplete(result); 892} 893 894void URLRequestHttpJob::RestartTransactionWithAuth( 895 const AuthCredentials& credentials) { 896 auth_credentials_ = credentials; 897 898 // These will be reset in OnStartCompleted. 899 response_info_ = NULL; 900 receive_headers_end_ = base::TimeTicks(); 901 response_cookies_.clear(); 902 903 ResetTimer(); 904 905 // Update the cookies, since the cookie store may have been updated from the 906 // headers in the 401/407. Since cookies were already appended to 907 // extra_headers, we need to strip them out before adding them again. 908 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kCookie); 909 910 AddCookieHeaderAndStart(); 911} 912 913void URLRequestHttpJob::SetUpload(UploadDataStream* upload) { 914 DCHECK(!transaction_.get()) << "cannot change once started"; 915 request_info_.upload_data_stream = upload; 916} 917 918void URLRequestHttpJob::SetExtraRequestHeaders( 919 const HttpRequestHeaders& headers) { 920 DCHECK(!transaction_.get()) << "cannot change once started"; 921 request_info_.extra_headers.CopyFrom(headers); 922} 923 924LoadState URLRequestHttpJob::GetLoadState() const { 925 return transaction_.get() ? 926 transaction_->GetLoadState() : LOAD_STATE_IDLE; 927} 928 929UploadProgress URLRequestHttpJob::GetUploadProgress() const { 930 return transaction_.get() ? 931 transaction_->GetUploadProgress() : UploadProgress(); 932} 933 934bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const { 935 DCHECK(transaction_.get()); 936 937 if (!response_info_) 938 return false; 939 940 return GetResponseHeaders()->GetMimeType(mime_type); 941} 942 943bool URLRequestHttpJob::GetCharset(std::string* charset) { 944 DCHECK(transaction_.get()); 945 946 if (!response_info_) 947 return false; 948 949 return GetResponseHeaders()->GetCharset(charset); 950} 951 952void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) { 953 DCHECK(request_); 954 955 if (response_info_) { 956 DCHECK(transaction_.get()); 957 958 *info = *response_info_; 959 if (override_response_headers_.get()) 960 info->headers = override_response_headers_; 961 } 962} 963 964void URLRequestHttpJob::GetLoadTimingInfo( 965 LoadTimingInfo* load_timing_info) const { 966 // If haven't made it far enough to receive any headers, don't return 967 // anything. This makes for more consistent behavior in the case of errors. 968 if (!transaction_ || receive_headers_end_.is_null()) 969 return; 970 if (transaction_->GetLoadTimingInfo(load_timing_info)) 971 load_timing_info->receive_headers_end = receive_headers_end_; 972} 973 974bool URLRequestHttpJob::GetResponseCookies(std::vector<std::string>* cookies) { 975 DCHECK(transaction_.get()); 976 977 if (!response_info_) 978 return false; 979 980 // TODO(darin): Why are we extracting response cookies again? Perhaps we 981 // should just leverage response_cookies_. 982 983 cookies->clear(); 984 FetchResponseCookies(cookies); 985 return true; 986} 987 988int URLRequestHttpJob::GetResponseCode() const { 989 DCHECK(transaction_.get()); 990 991 if (!response_info_) 992 return -1; 993 994 return GetResponseHeaders()->response_code(); 995} 996 997Filter* URLRequestHttpJob::SetupFilter() const { 998 DCHECK(transaction_.get()); 999 if (!response_info_) 1000 return NULL; 1001 1002 std::vector<Filter::FilterType> encoding_types; 1003 std::string encoding_type; 1004 HttpResponseHeaders* headers = GetResponseHeaders(); 1005 void* iter = NULL; 1006 while (headers->EnumerateHeader(&iter, "Content-Encoding", &encoding_type)) { 1007 encoding_types.push_back(Filter::ConvertEncodingToType(encoding_type)); 1008 } 1009 1010 if (filter_context_->IsSdchResponse()) { 1011 // We are wary of proxies that discard or damage SDCH encoding. If a server 1012 // explicitly states that this is not SDCH content, then we can correct our 1013 // assumption that this is an SDCH response, and avoid the need to recover 1014 // as though the content is corrupted (when we discover it is not SDCH 1015 // encoded). 1016 std::string sdch_response_status; 1017 iter = NULL; 1018 while (headers->EnumerateHeader(&iter, "X-Sdch-Encode", 1019 &sdch_response_status)) { 1020 if (sdch_response_status == "0") { 1021 filter_context_->ResetSdchResponseToFalse(); 1022 break; 1023 } 1024 } 1025 } 1026 1027 // Even if encoding types are empty, there is a chance that we need to add 1028 // some decoding, as some proxies strip encoding completely. In such cases, 1029 // we may need to add (for example) SDCH filtering (when the context suggests 1030 // it is appropriate). 1031 Filter::FixupEncodingTypes(*filter_context_, &encoding_types); 1032 1033 return !encoding_types.empty() 1034 ? Filter::Factory(encoding_types, *filter_context_) : NULL; 1035} 1036 1037bool URLRequestHttpJob::CopyFragmentOnRedirect(const GURL& location) const { 1038 // Allow modification of reference fragments by default, unless 1039 // |allowed_unsafe_redirect_url_| is set and equal to the redirect URL. 1040 // When this is the case, we assume that the network delegate has set the 1041 // desired redirect URL (with or without fragment), so it must not be changed 1042 // any more. 1043 return !allowed_unsafe_redirect_url_.is_valid() || 1044 allowed_unsafe_redirect_url_ != location; 1045} 1046 1047bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) { 1048 // HTTP is always safe. 1049 // TODO(pauljensen): Remove once crbug.com/146591 is fixed. 1050 if (location.is_valid() && 1051 (location.scheme() == "http" || location.scheme() == "https")) { 1052 return true; 1053 } 1054 // Delegates may mark a URL as safe for redirection. 1055 if (allowed_unsafe_redirect_url_.is_valid() && 1056 allowed_unsafe_redirect_url_ == location) { 1057 return true; 1058 } 1059 // Query URLRequestJobFactory as to whether |location| would be safe to 1060 // redirect to. 1061 return request_->context()->job_factory() && 1062 request_->context()->job_factory()->IsSafeRedirectTarget(location); 1063} 1064 1065bool URLRequestHttpJob::NeedsAuth() { 1066 int code = GetResponseCode(); 1067 if (code == -1) 1068 return false; 1069 1070 // Check if we need either Proxy or WWW Authentication. This could happen 1071 // because we either provided no auth info, or provided incorrect info. 1072 switch (code) { 1073 case 407: 1074 if (proxy_auth_state_ == AUTH_STATE_CANCELED) 1075 return false; 1076 proxy_auth_state_ = AUTH_STATE_NEED_AUTH; 1077 return true; 1078 case 401: 1079 if (server_auth_state_ == AUTH_STATE_CANCELED) 1080 return false; 1081 server_auth_state_ = AUTH_STATE_NEED_AUTH; 1082 return true; 1083 } 1084 return false; 1085} 1086 1087void URLRequestHttpJob::GetAuthChallengeInfo( 1088 scoped_refptr<AuthChallengeInfo>* result) { 1089 DCHECK(transaction_.get()); 1090 DCHECK(response_info_); 1091 1092 // sanity checks: 1093 DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH || 1094 server_auth_state_ == AUTH_STATE_NEED_AUTH); 1095 DCHECK((GetResponseHeaders()->response_code() == HTTP_UNAUTHORIZED) || 1096 (GetResponseHeaders()->response_code() == 1097 HTTP_PROXY_AUTHENTICATION_REQUIRED)); 1098 1099 *result = response_info_->auth_challenge; 1100} 1101 1102void URLRequestHttpJob::SetAuth(const AuthCredentials& credentials) { 1103 DCHECK(transaction_.get()); 1104 1105 // Proxy gets set first, then WWW. 1106 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { 1107 proxy_auth_state_ = AUTH_STATE_HAVE_AUTH; 1108 } else { 1109 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); 1110 server_auth_state_ = AUTH_STATE_HAVE_AUTH; 1111 } 1112 1113 RestartTransactionWithAuth(credentials); 1114} 1115 1116void URLRequestHttpJob::CancelAuth() { 1117 // Proxy gets set first, then WWW. 1118 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { 1119 proxy_auth_state_ = AUTH_STATE_CANCELED; 1120 } else { 1121 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); 1122 server_auth_state_ = AUTH_STATE_CANCELED; 1123 } 1124 1125 // These will be reset in OnStartCompleted. 1126 response_info_ = NULL; 1127 receive_headers_end_ = base::TimeTicks::Now(); 1128 response_cookies_.clear(); 1129 1130 ResetTimer(); 1131 1132 // OK, let the consumer read the error page... 1133 // 1134 // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false, 1135 // which will cause the consumer to receive OnResponseStarted instead of 1136 // OnAuthRequired. 1137 // 1138 // We have to do this via InvokeLater to avoid "recursing" the consumer. 1139 // 1140 base::MessageLoop::current()->PostTask( 1141 FROM_HERE, 1142 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1143 weak_factory_.GetWeakPtr(), OK)); 1144} 1145 1146void URLRequestHttpJob::ContinueWithCertificate( 1147 X509Certificate* client_cert) { 1148 DCHECK(transaction_.get()); 1149 1150 DCHECK(!response_info_) << "should not have a response yet"; 1151 receive_headers_end_ = base::TimeTicks(); 1152 1153 ResetTimer(); 1154 1155 // No matter what, we want to report our status as IO pending since we will 1156 // be notifying our consumer asynchronously via OnStartCompleted. 1157 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1158 1159 int rv = transaction_->RestartWithCertificate(client_cert, start_callback_); 1160 if (rv == ERR_IO_PENDING) 1161 return; 1162 1163 // The transaction started synchronously, but we need to notify the 1164 // URLRequest delegate via the message loop. 1165 base::MessageLoop::current()->PostTask( 1166 FROM_HERE, 1167 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1168 weak_factory_.GetWeakPtr(), rv)); 1169} 1170 1171void URLRequestHttpJob::ContinueDespiteLastError() { 1172 // If the transaction was destroyed, then the job was cancelled. 1173 if (!transaction_.get()) 1174 return; 1175 1176 DCHECK(!response_info_) << "should not have a response yet"; 1177 receive_headers_end_ = base::TimeTicks(); 1178 1179 ResetTimer(); 1180 1181 // No matter what, we want to report our status as IO pending since we will 1182 // be notifying our consumer asynchronously via OnStartCompleted. 1183 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1184 1185 int rv = transaction_->RestartIgnoringLastError(start_callback_); 1186 if (rv == ERR_IO_PENDING) 1187 return; 1188 1189 // The transaction started synchronously, but we need to notify the 1190 // URLRequest delegate via the message loop. 1191 base::MessageLoop::current()->PostTask( 1192 FROM_HERE, 1193 base::Bind(&URLRequestHttpJob::OnStartCompleted, 1194 weak_factory_.GetWeakPtr(), rv)); 1195} 1196 1197void URLRequestHttpJob::ResumeNetworkStart() { 1198 DCHECK(transaction_.get()); 1199 transaction_->ResumeNetworkStart(); 1200} 1201 1202bool URLRequestHttpJob::ShouldFixMismatchedContentLength(int rv) const { 1203 // Some servers send the body compressed, but specify the content length as 1204 // the uncompressed size. Although this violates the HTTP spec we want to 1205 // support it (as IE and FireFox do), but *only* for an exact match. 1206 // See http://crbug.com/79694. 1207 if (rv == net::ERR_CONTENT_LENGTH_MISMATCH || 1208 rv == net::ERR_INCOMPLETE_CHUNKED_ENCODING) { 1209 if (request_ && request_->response_headers()) { 1210 int64 expected_length = request_->response_headers()->GetContentLength(); 1211 VLOG(1) << __FUNCTION__ << "() " 1212 << "\"" << request_->url().spec() << "\"" 1213 << " content-length = " << expected_length 1214 << " pre total = " << prefilter_bytes_read() 1215 << " post total = " << postfilter_bytes_read(); 1216 if (postfilter_bytes_read() == expected_length) { 1217 // Clear the error. 1218 return true; 1219 } 1220 } 1221 } 1222 return false; 1223} 1224 1225bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size, 1226 int* bytes_read) { 1227 DCHECK_NE(buf_size, 0); 1228 DCHECK(bytes_read); 1229 DCHECK(!read_in_progress_); 1230 1231 int rv = transaction_->Read( 1232 buf, buf_size, 1233 base::Bind(&URLRequestHttpJob::OnReadCompleted, base::Unretained(this))); 1234 1235 if (ShouldFixMismatchedContentLength(rv)) 1236 rv = 0; 1237 1238 if (rv >= 0) { 1239 *bytes_read = rv; 1240 if (!rv) 1241 DoneWithRequest(FINISHED); 1242 return true; 1243 } 1244 1245 if (rv == ERR_IO_PENDING) { 1246 read_in_progress_ = true; 1247 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 1248 } else { 1249 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); 1250 } 1251 1252 return false; 1253} 1254 1255void URLRequestHttpJob::StopCaching() { 1256 if (transaction_.get()) 1257 transaction_->StopCaching(); 1258} 1259 1260bool URLRequestHttpJob::GetFullRequestHeaders( 1261 HttpRequestHeaders* headers) const { 1262 if (!transaction_) 1263 return false; 1264 1265 return transaction_->GetFullRequestHeaders(headers); 1266} 1267 1268int64 URLRequestHttpJob::GetTotalReceivedBytes() const { 1269 if (!transaction_) 1270 return 0; 1271 1272 return transaction_->GetTotalReceivedBytes(); 1273} 1274 1275void URLRequestHttpJob::DoneReading() { 1276 if (transaction_) { 1277 transaction_->DoneReading(); 1278 } 1279 DoneWithRequest(FINISHED); 1280} 1281 1282void URLRequestHttpJob::DoneReadingRedirectResponse() { 1283 if (transaction_) { 1284 if (transaction_->GetResponseInfo()->headers->IsRedirect(NULL)) { 1285 // If the original headers indicate a redirect, go ahead and cache the 1286 // response, even if the |override_response_headers_| are a redirect to 1287 // another location. 1288 transaction_->DoneReading(); 1289 } else { 1290 // Otherwise, |override_response_headers_| must be non-NULL and contain 1291 // bogus headers indicating a redirect. 1292 DCHECK(override_response_headers_); 1293 DCHECK(override_response_headers_->IsRedirect(NULL)); 1294 transaction_->StopCaching(); 1295 } 1296 } 1297 DoneWithRequest(FINISHED); 1298} 1299 1300HostPortPair URLRequestHttpJob::GetSocketAddress() const { 1301 return response_info_ ? response_info_->socket_address : HostPortPair(); 1302} 1303 1304void URLRequestHttpJob::RecordTimer() { 1305 if (request_creation_time_.is_null()) { 1306 NOTREACHED() 1307 << "The same transaction shouldn't start twice without new timing."; 1308 return; 1309 } 1310 1311 base::TimeDelta to_start = base::Time::Now() - request_creation_time_; 1312 request_creation_time_ = base::Time(); 1313 1314 UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte", to_start); 1315} 1316 1317void URLRequestHttpJob::ResetTimer() { 1318 if (!request_creation_time_.is_null()) { 1319 NOTREACHED() 1320 << "The timer was reset before it was recorded."; 1321 return; 1322 } 1323 request_creation_time_ = base::Time::Now(); 1324} 1325 1326void URLRequestHttpJob::UpdatePacketReadTimes() { 1327 if (!packet_timing_enabled_) 1328 return; 1329 1330 if (filter_input_byte_count() <= bytes_observed_in_packets_) { 1331 DCHECK_EQ(filter_input_byte_count(), bytes_observed_in_packets_); 1332 return; // No new bytes have arrived. 1333 } 1334 1335 final_packet_time_ = base::Time::Now(); 1336 if (!bytes_observed_in_packets_) 1337 request_time_snapshot_ = request_ ? request_->request_time() : base::Time(); 1338 1339 bytes_observed_in_packets_ = filter_input_byte_count(); 1340} 1341 1342void URLRequestHttpJob::RecordPacketStats( 1343 FilterContext::StatisticSelector statistic) const { 1344 if (!packet_timing_enabled_ || (final_packet_time_ == base::Time())) 1345 return; 1346 1347 base::TimeDelta duration = final_packet_time_ - request_time_snapshot_; 1348 switch (statistic) { 1349 case FilterContext::SDCH_DECODE: { 1350 UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b", 1351 static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100); 1352 return; 1353 } 1354 case FilterContext::SDCH_PASSTHROUGH: { 1355 // Despite advertising a dictionary, we handled non-sdch compressed 1356 // content. 1357 return; 1358 } 1359 1360 case FilterContext::SDCH_EXPERIMENT_DECODE: { 1361 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Decode", 1362 duration, 1363 base::TimeDelta::FromMilliseconds(20), 1364 base::TimeDelta::FromMinutes(10), 100); 1365 return; 1366 } 1367 case FilterContext::SDCH_EXPERIMENT_HOLDBACK: { 1368 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Holdback", 1369 duration, 1370 base::TimeDelta::FromMilliseconds(20), 1371 base::TimeDelta::FromMinutes(10), 100); 1372 return; 1373 } 1374 default: 1375 NOTREACHED(); 1376 return; 1377 } 1378} 1379 1380// The common type of histogram we use for all compression-tracking histograms. 1381#define COMPRESSION_HISTOGRAM(name, sample) \ 1382 do { \ 1383 UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \ 1384 500, 1000000, 100); \ 1385 } while (0) 1386 1387void URLRequestHttpJob::RecordCompressionHistograms() { 1388 DCHECK(request_); 1389 if (!request_) 1390 return; 1391 1392 if (is_cached_content_ || // Don't record cached content 1393 !GetStatus().is_success() || // Don't record failed content 1394 !IsCompressibleContent() || // Only record compressible content 1395 !prefilter_bytes_read()) // Zero-byte responses aren't useful. 1396 return; 1397 1398 // Miniature requests aren't really compressible. Don't count them. 1399 const int kMinSize = 16; 1400 if (prefilter_bytes_read() < kMinSize) 1401 return; 1402 1403 // Only record for http or https urls. 1404 bool is_http = request_->url().SchemeIs("http"); 1405 bool is_https = request_->url().SchemeIs("https"); 1406 if (!is_http && !is_https) 1407 return; 1408 1409 int compressed_B = prefilter_bytes_read(); 1410 int decompressed_B = postfilter_bytes_read(); 1411 bool was_filtered = HasFilter(); 1412 1413 // We want to record how often downloaded resources are compressed. 1414 // But, we recognize that different protocols may have different 1415 // properties. So, for each request, we'll put it into one of 3 1416 // groups: 1417 // a) SSL resources 1418 // Proxies cannot tamper with compression headers with SSL. 1419 // b) Non-SSL, loaded-via-proxy resources 1420 // In this case, we know a proxy might have interfered. 1421 // c) Non-SSL, loaded-without-proxy resources 1422 // In this case, we know there was no explicit proxy. However, 1423 // it is possible that a transparent proxy was still interfering. 1424 // 1425 // For each group, we record the same 3 histograms. 1426 1427 if (is_https) { 1428 if (was_filtered) { 1429 COMPRESSION_HISTOGRAM("SSL.BytesBeforeCompression", compressed_B); 1430 COMPRESSION_HISTOGRAM("SSL.BytesAfterCompression", decompressed_B); 1431 } else { 1432 COMPRESSION_HISTOGRAM("SSL.ShouldHaveBeenCompressed", decompressed_B); 1433 } 1434 return; 1435 } 1436 1437 if (request_->was_fetched_via_proxy()) { 1438 if (was_filtered) { 1439 COMPRESSION_HISTOGRAM("Proxy.BytesBeforeCompression", compressed_B); 1440 COMPRESSION_HISTOGRAM("Proxy.BytesAfterCompression", decompressed_B); 1441 } else { 1442 COMPRESSION_HISTOGRAM("Proxy.ShouldHaveBeenCompressed", decompressed_B); 1443 } 1444 return; 1445 } 1446 1447 if (was_filtered) { 1448 COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B); 1449 COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B); 1450 } else { 1451 COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B); 1452 } 1453} 1454 1455bool URLRequestHttpJob::IsCompressibleContent() const { 1456 std::string mime_type; 1457 return GetMimeType(&mime_type) && 1458 (IsSupportedJavascriptMimeType(mime_type.c_str()) || 1459 IsSupportedNonImageMimeType(mime_type.c_str())); 1460} 1461 1462void URLRequestHttpJob::RecordPerfHistograms(CompletionCause reason) { 1463 if (start_time_.is_null()) 1464 return; 1465 1466 base::TimeDelta total_time = base::TimeTicks::Now() - start_time_; 1467 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTime", total_time); 1468 1469 if (reason == FINISHED) { 1470 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeSuccess", total_time); 1471 } else { 1472 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCancel", total_time); 1473 } 1474 1475 if (response_info_) { 1476 if (response_info_->was_cached) { 1477 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCached", total_time); 1478 } else { 1479 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeNotCached", total_time); 1480 } 1481 } 1482 1483 if (request_info_.load_flags & LOAD_PREFETCH && !request_->was_cached()) 1484 UMA_HISTOGRAM_COUNTS("Net.Prefetch.PrefilterBytesReadFromNetwork", 1485 prefilter_bytes_read()); 1486 1487 start_time_ = base::TimeTicks(); 1488} 1489 1490void URLRequestHttpJob::DoneWithRequest(CompletionCause reason) { 1491 if (done_) 1492 return; 1493 done_ = true; 1494 RecordPerfHistograms(reason); 1495 if (reason == FINISHED) { 1496 request_->set_received_response_content_length(prefilter_bytes_read()); 1497 RecordCompressionHistograms(); 1498 } 1499} 1500 1501HttpResponseHeaders* URLRequestHttpJob::GetResponseHeaders() const { 1502 DCHECK(transaction_.get()); 1503 DCHECK(transaction_->GetResponseInfo()); 1504 return override_response_headers_.get() ? 1505 override_response_headers_.get() : 1506 transaction_->GetResponseInfo()->headers.get(); 1507} 1508 1509void URLRequestHttpJob::NotifyURLRequestDestroyed() { 1510 awaiting_callback_ = false; 1511} 1512 1513} // namespace net 1514