reliable_quic_stream.cc revision 46d4c2bc3267f3f028f39e7e311b0f89aba2e4fd
1// Copyright (c) 2012 The Chromium Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "net/quic/reliable_quic_stream.h" 6 7#include "base/logging.h" 8#include "net/quic/iovector.h" 9#include "net/quic/quic_flow_controller.h" 10#include "net/quic/quic_session.h" 11#include "net/quic/quic_write_blocked_list.h" 12 13using base::StringPiece; 14using std::min; 15 16namespace net { 17 18#define ENDPOINT (is_server_ ? "Server: " : " Client: ") 19 20namespace { 21 22struct iovec MakeIovec(StringPiece data) { 23 struct iovec iov = {const_cast<char*>(data.data()), 24 static_cast<size_t>(data.size())}; 25 return iov; 26} 27 28} // namespace 29 30// Wrapper that aggregates OnAckNotifications for packets sent using 31// WriteOrBufferData and delivers them to the original 32// QuicAckNotifier::DelegateInterface after all bytes written using 33// WriteOrBufferData are acked. This level of indirection is 34// necessary because the delegate interface provides no mechanism that 35// WriteOrBufferData can use to inform it that the write required 36// multiple WritevData calls or that only part of the data has been 37// sent out by the time ACKs start arriving. 38class ReliableQuicStream::ProxyAckNotifierDelegate 39 : public QuicAckNotifier::DelegateInterface { 40 public: 41 explicit ProxyAckNotifierDelegate(DelegateInterface* delegate) 42 : delegate_(delegate), 43 pending_acks_(0), 44 wrote_last_data_(false), 45 num_original_packets_(0), 46 num_original_bytes_(0), 47 num_retransmitted_packets_(0), 48 num_retransmitted_bytes_(0) { 49 } 50 51 virtual void OnAckNotification(int num_original_packets, 52 int num_original_bytes, 53 int num_retransmitted_packets, 54 int num_retransmitted_bytes, 55 QuicTime::Delta delta_largest_observed) 56 OVERRIDE { 57 DCHECK_LT(0, pending_acks_); 58 --pending_acks_; 59 num_original_packets_ += num_original_packets; 60 num_original_bytes_ += num_original_bytes; 61 num_retransmitted_packets_ += num_retransmitted_packets; 62 num_retransmitted_bytes_ += num_retransmitted_bytes; 63 64 if (wrote_last_data_ && pending_acks_ == 0) { 65 delegate_->OnAckNotification(num_original_packets_, 66 num_original_bytes_, 67 num_retransmitted_packets_, 68 num_retransmitted_bytes_, 69 delta_largest_observed); 70 } 71 } 72 73 void WroteData(bool last_data) { 74 DCHECK(!wrote_last_data_); 75 ++pending_acks_; 76 wrote_last_data_ = last_data; 77 } 78 79 protected: 80 // Delegates are ref counted. 81 virtual ~ProxyAckNotifierDelegate() { 82 } 83 84 private: 85 // Original delegate. delegate_->OnAckNotification will be called when: 86 // wrote_last_data_ == true and pending_acks_ == 0 87 scoped_refptr<DelegateInterface> delegate_; 88 89 // Number of outstanding acks. 90 int pending_acks_; 91 92 // True if no pending writes remain. 93 bool wrote_last_data_; 94 95 // Accumulators. 96 int num_original_packets_; 97 int num_original_bytes_; 98 int num_retransmitted_packets_; 99 int num_retransmitted_bytes_; 100 101 DISALLOW_COPY_AND_ASSIGN(ProxyAckNotifierDelegate); 102}; 103 104ReliableQuicStream::PendingData::PendingData( 105 string data_in, scoped_refptr<ProxyAckNotifierDelegate> delegate_in) 106 : data(data_in), delegate(delegate_in) { 107} 108 109ReliableQuicStream::PendingData::~PendingData() { 110} 111 112ReliableQuicStream::ReliableQuicStream(QuicStreamId id, QuicSession* session) 113 : sequencer_(this), 114 id_(id), 115 session_(session), 116 stream_bytes_read_(0), 117 stream_bytes_written_(0), 118 stream_error_(QUIC_STREAM_NO_ERROR), 119 connection_error_(QUIC_NO_ERROR), 120 read_side_closed_(false), 121 write_side_closed_(false), 122 fin_buffered_(false), 123 fin_sent_(false), 124 rst_sent_(false), 125 is_server_(session_->is_server()), 126 flow_controller_( 127 session_->connection(), 128 id_, 129 is_server_, 130 session_->config()->HasReceivedInitialFlowControlWindowBytes() ? 131 session_->config()->ReceivedInitialFlowControlWindowBytes() : 132 kDefaultFlowControlSendWindow, 133 session_->max_flow_control_receive_window_bytes(), 134 session_->max_flow_control_receive_window_bytes()), 135 connection_flow_controller_(session_->flow_controller()) { 136} 137 138ReliableQuicStream::~ReliableQuicStream() { 139} 140 141bool ReliableQuicStream::OnStreamFrame(const QuicStreamFrame& frame) { 142 if (read_side_closed_) { 143 DVLOG(1) << ENDPOINT << "Ignoring frame " << frame.stream_id; 144 // We don't want to be reading: blackhole the data. 145 return true; 146 } 147 148 if (frame.stream_id != id_) { 149 LOG(ERROR) << "Error!"; 150 return false; 151 } 152 153 // This count include duplicate data received. 154 size_t frame_payload_size = frame.data.TotalBufferSize(); 155 stream_bytes_read_ += frame_payload_size; 156 157 // Flow control is interested in tracking highest received offset. 158 if (MaybeIncreaseHighestReceivedOffset(frame.offset + frame_payload_size)) { 159 // As the highest received offset has changed, we should check to see if 160 // this is a violation of flow control. 161 if (flow_controller_.FlowControlViolation() || 162 connection_flow_controller_->FlowControlViolation()) { 163 session_->connection()->SendConnectionClose( 164 QUIC_FLOW_CONTROL_RECEIVED_TOO_MUCH_DATA); 165 return false; 166 } 167 } 168 169 return sequencer_.OnStreamFrame(frame); 170} 171 172int ReliableQuicStream::num_frames_received() const { 173 return sequencer_.num_frames_received(); 174} 175 176int ReliableQuicStream::num_duplicate_frames_received() const { 177 return sequencer_.num_duplicate_frames_received(); 178} 179 180void ReliableQuicStream::OnStreamReset(const QuicRstStreamFrame& frame) { 181 MaybeIncreaseHighestReceivedOffset(frame.byte_offset); 182 183 stream_error_ = frame.error_code; 184 CloseWriteSide(); 185 CloseReadSide(); 186} 187 188void ReliableQuicStream::OnConnectionClosed(QuicErrorCode error, 189 bool from_peer) { 190 if (read_side_closed_ && write_side_closed_) { 191 return; 192 } 193 if (error != QUIC_NO_ERROR) { 194 stream_error_ = QUIC_STREAM_CONNECTION_ERROR; 195 connection_error_ = error; 196 } 197 198 CloseWriteSide(); 199 CloseReadSide(); 200} 201 202void ReliableQuicStream::OnFinRead() { 203 DCHECK(sequencer_.IsClosed()); 204 CloseReadSide(); 205} 206 207void ReliableQuicStream::Reset(QuicRstStreamErrorCode error) { 208 DCHECK_NE(QUIC_STREAM_NO_ERROR, error); 209 stream_error_ = error; 210 // Sending a RstStream results in calling CloseStream. 211 session()->SendRstStream(id(), error, stream_bytes_written_); 212 rst_sent_ = true; 213} 214 215void ReliableQuicStream::CloseConnection(QuicErrorCode error) { 216 session()->connection()->SendConnectionClose(error); 217} 218 219void ReliableQuicStream::CloseConnectionWithDetails(QuicErrorCode error, 220 const string& details) { 221 session()->connection()->SendConnectionCloseWithDetails(error, details); 222} 223 224QuicVersion ReliableQuicStream::version() const { 225 return session()->connection()->version(); 226} 227 228void ReliableQuicStream::WriteOrBufferData( 229 StringPiece data, 230 bool fin, 231 QuicAckNotifier::DelegateInterface* ack_notifier_delegate) { 232 if (data.empty() && !fin) { 233 LOG(DFATAL) << "data.empty() && !fin"; 234 return; 235 } 236 237 if (fin_buffered_) { 238 LOG(DFATAL) << "Fin already buffered"; 239 return; 240 } 241 242 scoped_refptr<ProxyAckNotifierDelegate> proxy_delegate; 243 if (ack_notifier_delegate != NULL) { 244 proxy_delegate = new ProxyAckNotifierDelegate(ack_notifier_delegate); 245 } 246 247 QuicConsumedData consumed_data(0, false); 248 fin_buffered_ = fin; 249 250 if (queued_data_.empty()) { 251 struct iovec iov(MakeIovec(data)); 252 consumed_data = WritevData(&iov, 1, fin, proxy_delegate.get()); 253 DCHECK_LE(consumed_data.bytes_consumed, data.length()); 254 } 255 256 bool write_completed; 257 // If there's unconsumed data or an unconsumed fin, queue it. 258 if (consumed_data.bytes_consumed < data.length() || 259 (fin && !consumed_data.fin_consumed)) { 260 StringPiece remainder(data.substr(consumed_data.bytes_consumed)); 261 queued_data_.push_back(PendingData(remainder.as_string(), proxy_delegate)); 262 write_completed = false; 263 } else { 264 write_completed = true; 265 } 266 267 if ((proxy_delegate.get() != NULL) && 268 (consumed_data.bytes_consumed > 0 || consumed_data.fin_consumed)) { 269 proxy_delegate->WroteData(write_completed); 270 } 271} 272 273void ReliableQuicStream::OnCanWrite() { 274 bool fin = false; 275 while (!queued_data_.empty()) { 276 PendingData* pending_data = &queued_data_.front(); 277 ProxyAckNotifierDelegate* delegate = pending_data->delegate.get(); 278 if (queued_data_.size() == 1 && fin_buffered_) { 279 fin = true; 280 } 281 struct iovec iov(MakeIovec(pending_data->data)); 282 QuicConsumedData consumed_data = WritevData(&iov, 1, fin, delegate); 283 if (consumed_data.bytes_consumed == pending_data->data.size() && 284 fin == consumed_data.fin_consumed) { 285 queued_data_.pop_front(); 286 if (delegate != NULL) { 287 delegate->WroteData(true); 288 } 289 } else { 290 if (consumed_data.bytes_consumed > 0) { 291 pending_data->data.erase(0, consumed_data.bytes_consumed); 292 if (delegate != NULL) { 293 delegate->WroteData(false); 294 } 295 } 296 break; 297 } 298 } 299} 300 301void ReliableQuicStream::MaybeSendBlocked() { 302 flow_controller_.MaybeSendBlocked(); 303 connection_flow_controller_->MaybeSendBlocked(); 304 // If we are connection level flow control blocked, then add the stream 305 // to the write blocked list. It will be given a chance to write when a 306 // connection level WINDOW_UPDATE arrives. 307 if (connection_flow_controller_->IsBlocked() && 308 !flow_controller_.IsBlocked()) { 309 session_->MarkWriteBlocked(id(), EffectivePriority()); 310 } 311} 312 313QuicConsumedData ReliableQuicStream::WritevData( 314 const struct iovec* iov, 315 int iov_count, 316 bool fin, 317 QuicAckNotifier::DelegateInterface* ack_notifier_delegate) { 318 if (write_side_closed_) { 319 DLOG(ERROR) << ENDPOINT << "Attempt to write when the write side is closed"; 320 return QuicConsumedData(0, false); 321 } 322 323 // How much data we want to write. 324 size_t write_length = TotalIovecLength(iov, iov_count); 325 326 // A FIN with zero data payload should not be flow control blocked. 327 bool fin_with_zero_data = (fin && write_length == 0); 328 329 if (flow_controller_.IsEnabled()) { 330 // How much data we are allowed to write from flow control. 331 uint64 send_window = flow_controller_.SendWindowSize(); 332 if (connection_flow_controller_->IsEnabled()) { 333 send_window = 334 min(send_window, connection_flow_controller_->SendWindowSize()); 335 } 336 337 if (send_window == 0 && !fin_with_zero_data) { 338 // Quick return if we can't send anything. 339 MaybeSendBlocked(); 340 return QuicConsumedData(0, false); 341 } 342 343 if (write_length > send_window) { 344 // Don't send the FIN if we aren't going to send all the data. 345 fin = false; 346 347 // Writing more data would be a violation of flow control. 348 write_length = send_window; 349 } 350 } 351 352 // Fill an IOVector with bytes from the iovec. 353 IOVector data; 354 data.AppendIovecAtMostBytes(iov, iov_count, write_length); 355 356 QuicConsumedData consumed_data = session()->WritevData( 357 id(), data, stream_bytes_written_, fin, ack_notifier_delegate); 358 stream_bytes_written_ += consumed_data.bytes_consumed; 359 360 AddBytesSent(consumed_data.bytes_consumed); 361 362 if (consumed_data.bytes_consumed == write_length) { 363 if (!fin_with_zero_data) { 364 MaybeSendBlocked(); 365 } 366 if (fin && consumed_data.fin_consumed) { 367 fin_sent_ = true; 368 CloseWriteSide(); 369 } else if (fin && !consumed_data.fin_consumed) { 370 session_->MarkWriteBlocked(id(), EffectivePriority()); 371 } 372 } else { 373 session_->MarkWriteBlocked(id(), EffectivePriority()); 374 } 375 return consumed_data; 376} 377 378void ReliableQuicStream::CloseReadSide() { 379 if (read_side_closed_) { 380 return; 381 } 382 DVLOG(1) << ENDPOINT << "Done reading from stream " << id(); 383 384 read_side_closed_ = true; 385 if (write_side_closed_) { 386 DVLOG(1) << ENDPOINT << "Closing stream: " << id(); 387 session_->CloseStream(id()); 388 } 389} 390 391void ReliableQuicStream::CloseWriteSide() { 392 if (write_side_closed_) { 393 return; 394 } 395 DVLOG(1) << ENDPOINT << "Done writing to stream " << id(); 396 397 write_side_closed_ = true; 398 if (read_side_closed_) { 399 DVLOG(1) << ENDPOINT << "Closing stream: " << id(); 400 session_->CloseStream(id()); 401 } 402} 403 404bool ReliableQuicStream::HasBufferedData() const { 405 return !queued_data_.empty(); 406} 407 408void ReliableQuicStream::OnClose() { 409 CloseReadSide(); 410 CloseWriteSide(); 411 412 if (!fin_sent_ && !rst_sent_) { 413 // For flow control accounting, we must tell the peer how many bytes we have 414 // written on this stream before termination. Done here if needed, using a 415 // RST frame. 416 DVLOG(1) << ENDPOINT << "Sending RST in OnClose: " << id(); 417 session_->SendRstStream(id(), QUIC_RST_FLOW_CONTROL_ACCOUNTING, 418 stream_bytes_written_); 419 rst_sent_ = true; 420 } 421 422 // We are closing the stream and will not process any further incoming bytes. 423 // As there may be more bytes in flight and we need to ensure that both 424 // endpoints have the same connection level flow control state, mark all 425 // unreceived or buffered bytes as consumed. 426 uint64 bytes_to_consume = flow_controller_.highest_received_byte_offset() - 427 flow_controller_.bytes_consumed(); 428 AddBytesConsumed(bytes_to_consume); 429} 430 431void ReliableQuicStream::OnWindowUpdateFrame( 432 const QuicWindowUpdateFrame& frame) { 433 if (!flow_controller_.IsEnabled()) { 434 DLOG(DFATAL) << "Flow control not enabled! " << version(); 435 return; 436 } 437 438 if (flow_controller_.UpdateSendWindowOffset(frame.byte_offset)) { 439 // We can write again! 440 // TODO(rjshade): This does not respect priorities (e.g. multiple 441 // outstanding POSTs are unblocked on arrival of 442 // SHLO with initial window). 443 // As long as the connection is not flow control blocked, we can write! 444 OnCanWrite(); 445 } 446} 447 448bool ReliableQuicStream::MaybeIncreaseHighestReceivedOffset(uint64 new_offset) { 449 if (flow_controller_.IsEnabled()) { 450 uint64 increment = 451 new_offset - flow_controller_.highest_received_byte_offset(); 452 if (flow_controller_.UpdateHighestReceivedOffset(new_offset)) { 453 // If |new_offset| increased the stream flow controller's highest received 454 // offset, then we need to increase the connection flow controller's value 455 // by the incremental difference. 456 connection_flow_controller_->UpdateHighestReceivedOffset( 457 connection_flow_controller_->highest_received_byte_offset() + 458 increment); 459 return true; 460 } 461 } 462 return false; 463} 464 465void ReliableQuicStream::AddBytesSent(uint64 bytes) { 466 if (flow_controller_.IsEnabled()) { 467 flow_controller_.AddBytesSent(bytes); 468 connection_flow_controller_->AddBytesSent(bytes); 469 } 470} 471 472void ReliableQuicStream::AddBytesConsumed(uint64 bytes) { 473 if (flow_controller_.IsEnabled()) { 474 // Only adjust stream level flow controller if we are still reading. 475 if (!read_side_closed_) { 476 flow_controller_.AddBytesConsumed(bytes); 477 } 478 479 connection_flow_controller_->AddBytesConsumed(bytes); 480 } 481} 482 483bool ReliableQuicStream::IsFlowControlBlocked() { 484 return flow_controller_.IsBlocked() || 485 connection_flow_controller_->IsBlocked(); 486} 487 488} // namespace net 489