chunk_demuxer_unittest.cc revision 0de6073388f4e2780db8536178b129cd8f6ab386
1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <algorithm>
6
7#include "base/bind.h"
8#include "base/message_loop/message_loop.h"
9#include "base/strings/string_number_conversions.h"
10#include "base/strings/string_split.h"
11#include "base/strings/string_util.h"
12#include "media/base/audio_decoder_config.h"
13#include "media/base/decoder_buffer.h"
14#include "media/base/decrypt_config.h"
15#include "media/base/mock_demuxer_host.h"
16#include "media/base/test_data_util.h"
17#include "media/base/test_helpers.h"
18#include "media/filters/chunk_demuxer.h"
19#include "media/formats/webm/cluster_builder.h"
20#include "media/formats/webm/webm_constants.h"
21#include "media/formats/webm/webm_crypto_helpers.h"
22#include "testing/gtest/include/gtest/gtest.h"
23
24using ::testing::AnyNumber;
25using ::testing::Exactly;
26using ::testing::InSequence;
27using ::testing::NotNull;
28using ::testing::Return;
29using ::testing::SaveArg;
30using ::testing::SetArgumentPointee;
31using ::testing::Values;
32using ::testing::_;
33
34namespace media {
35
36const uint8 kTracksHeader[] = {
37  0x16, 0x54, 0xAE, 0x6B,  // Tracks ID
38  0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  // tracks(size = 0)
39};
40
41// WebM Block bytes that represent a VP8 keyframe.
42const uint8 kVP8Keyframe[] = {
43  0x010, 0x00, 0x00, 0x9d, 0x01, 0x2a, 0x00, 0x10, 0x00, 0x10, 0x00
44};
45
46// WebM Block bytes that represent a VP8 interframe.
47const uint8 kVP8Interframe[] = { 0x11, 0x00, 0x00 };
48
49const int kTracksHeaderSize = sizeof(kTracksHeader);
50const int kTracksSizeOffset = 4;
51
52// The size of TrackEntry element in test file "webm_vorbis_track_entry" starts
53// at index 1 and spans 8 bytes.
54const int kAudioTrackSizeOffset = 1;
55const int kAudioTrackSizeWidth = 8;
56const int kAudioTrackEntryHeaderSize =
57    kAudioTrackSizeOffset + kAudioTrackSizeWidth;
58
59// The size of TrackEntry element in test file "webm_vp8_track_entry" starts at
60// index 1 and spans 8 bytes.
61const int kVideoTrackSizeOffset = 1;
62const int kVideoTrackSizeWidth = 8;
63const int kVideoTrackEntryHeaderSize =
64    kVideoTrackSizeOffset + kVideoTrackSizeWidth;
65
66const int kVideoTrackNum = 1;
67const int kAudioTrackNum = 2;
68const int kTextTrackNum = 3;
69
70const int kAudioBlockDuration = 23;
71const int kVideoBlockDuration = 33;
72const int kTextBlockDuration = 100;
73const int kBlockSize = 10;
74
75const char kSourceId[] = "SourceId";
76const char kDefaultFirstClusterRange[] = "{ [0,46) }";
77const int kDefaultFirstClusterEndTimestamp = 66;
78const int kDefaultSecondClusterEndTimestamp = 132;
79
80base::TimeDelta kDefaultDuration() {
81  return base::TimeDelta::FromMilliseconds(201224);
82}
83
84// Write an integer into buffer in the form of vint that spans 8 bytes.
85// The data pointed by |buffer| should be at least 8 bytes long.
86// |number| should be in the range 0 <= number < 0x00FFFFFFFFFFFFFF.
87static void WriteInt64(uint8* buffer, int64 number) {
88  DCHECK(number >= 0 && number < 0x00FFFFFFFFFFFFFFLL);
89  buffer[0] = 0x01;
90  int64 tmp = number;
91  for (int i = 7; i > 0; i--) {
92    buffer[i] = tmp & 0xff;
93    tmp >>= 8;
94  }
95}
96
97MATCHER_P(HasTimestamp, timestamp_in_ms, "") {
98  return arg.get() && !arg->end_of_stream() &&
99         arg->timestamp().InMilliseconds() == timestamp_in_ms;
100}
101
102MATCHER(IsEndOfStream, "") { return arg.get() && arg->end_of_stream(); }
103
104static void OnReadDone(const base::TimeDelta& expected_time,
105                       bool* called,
106                       DemuxerStream::Status status,
107                       const scoped_refptr<DecoderBuffer>& buffer) {
108  EXPECT_EQ(status, DemuxerStream::kOk);
109  EXPECT_EQ(expected_time, buffer->timestamp());
110  *called = true;
111}
112
113static void OnReadDone_AbortExpected(
114    bool* called, DemuxerStream::Status status,
115    const scoped_refptr<DecoderBuffer>& buffer) {
116  EXPECT_EQ(status, DemuxerStream::kAborted);
117  EXPECT_EQ(NULL, buffer.get());
118  *called = true;
119}
120
121static void OnReadDone_EOSExpected(bool* called,
122                                   DemuxerStream::Status status,
123                                   const scoped_refptr<DecoderBuffer>& buffer) {
124  EXPECT_EQ(status, DemuxerStream::kOk);
125  EXPECT_TRUE(buffer->end_of_stream());
126  *called = true;
127}
128
129static void OnSeekDone_OKExpected(bool* called, PipelineStatus status) {
130  EXPECT_EQ(status, PIPELINE_OK);
131  *called = true;
132}
133
134static void LogFunc(const std::string& str) { DVLOG(1) << str; }
135
136// Test parameter determines which coded frame processor is used to process
137// appended data. If true, LegacyFrameProcessor is used. Otherwise, the new
138// FrameProcessor is used.
139class ChunkDemuxerTest : public ::testing::TestWithParam<bool> {
140 protected:
141  enum CodecsIndex {
142    AUDIO,
143    VIDEO,
144    MAX_CODECS_INDEX
145  };
146
147  // Default cluster to append first for simple tests.
148  scoped_ptr<Cluster> kDefaultFirstCluster() {
149    return GenerateCluster(0, 4);
150  }
151
152  // Default cluster to append after kDefaultFirstCluster()
153  // has been appended. This cluster starts with blocks that
154  // have timestamps consistent with the end times of the blocks
155  // in kDefaultFirstCluster() so that these two clusters represent
156  // a continuous region.
157  scoped_ptr<Cluster> kDefaultSecondCluster() {
158    return GenerateCluster(46, 66, 5);
159  }
160
161  ChunkDemuxerTest()
162      : append_window_end_for_next_append_(kInfiniteDuration()) {
163    use_legacy_frame_processor_ = GetParam();
164    CreateNewDemuxer();
165  }
166
167  void CreateNewDemuxer() {
168    base::Closure open_cb =
169        base::Bind(&ChunkDemuxerTest::DemuxerOpened, base::Unretained(this));
170    Demuxer::NeedKeyCB need_key_cb =
171        base::Bind(&ChunkDemuxerTest::DemuxerNeedKey, base::Unretained(this));
172    demuxer_.reset(
173        new ChunkDemuxer(open_cb, need_key_cb, base::Bind(&LogFunc), true));
174  }
175
176  virtual ~ChunkDemuxerTest() {
177    ShutdownDemuxer();
178  }
179
180  void CreateInitSegment(int stream_flags,
181                         bool is_audio_encrypted, bool is_video_encrypted,
182                         scoped_ptr<uint8[]>* buffer,
183                         int* size) {
184    bool has_audio = (stream_flags & HAS_AUDIO) != 0;
185    bool has_video = (stream_flags & HAS_VIDEO) != 0;
186    bool has_text = (stream_flags & HAS_TEXT) != 0;
187    scoped_refptr<DecoderBuffer> ebml_header;
188    scoped_refptr<DecoderBuffer> info;
189    scoped_refptr<DecoderBuffer> audio_track_entry;
190    scoped_refptr<DecoderBuffer> video_track_entry;
191    scoped_refptr<DecoderBuffer> audio_content_encodings;
192    scoped_refptr<DecoderBuffer> video_content_encodings;
193    scoped_refptr<DecoderBuffer> text_track_entry;
194
195    ebml_header = ReadTestDataFile("webm_ebml_element");
196
197    info = ReadTestDataFile("webm_info_element");
198
199    int tracks_element_size = 0;
200
201    if (has_audio) {
202      audio_track_entry = ReadTestDataFile("webm_vorbis_track_entry");
203      tracks_element_size += audio_track_entry->data_size();
204      if (is_audio_encrypted) {
205        audio_content_encodings = ReadTestDataFile("webm_content_encodings");
206        tracks_element_size += audio_content_encodings->data_size();
207      }
208    }
209
210    if (has_video) {
211      video_track_entry = ReadTestDataFile("webm_vp8_track_entry");
212      tracks_element_size += video_track_entry->data_size();
213      if (is_video_encrypted) {
214        video_content_encodings = ReadTestDataFile("webm_content_encodings");
215        tracks_element_size += video_content_encodings->data_size();
216      }
217    }
218
219    if (has_text) {
220      // TODO(matthewjheaney): create an abstraction to do
221      // this (http://crbug/321454).
222      // We need it to also handle the creation of multiple text tracks.
223      //
224      // This is the track entry for a text track,
225      // TrackEntry [AE], size=30
226      //   TrackNum [D7], size=1, val=3
227      //   TrackUID [73] [C5], size=1, value=3
228      //   TrackType [83], size=1, val=0x11
229      //   CodecId [86], size=18, val="D_WEBVTT/SUBTITLES"
230      const char str[] = "\xAE\x9E\xD7\x81\x03\x73\xC5\x81\x03"
231                         "\x83\x81\x11\x86\x92"
232                         "D_WEBVTT/SUBTITLES";
233      const int len = strlen(str);
234      DCHECK_EQ(len, 32);
235      const uint8* const buf = reinterpret_cast<const uint8*>(str);
236      text_track_entry = DecoderBuffer::CopyFrom(buf, len);
237      tracks_element_size += text_track_entry->data_size();
238    }
239
240    *size = ebml_header->data_size() + info->data_size() +
241        kTracksHeaderSize + tracks_element_size;
242
243    buffer->reset(new uint8[*size]);
244
245    uint8* buf = buffer->get();
246    memcpy(buf, ebml_header->data(), ebml_header->data_size());
247    buf += ebml_header->data_size();
248
249    memcpy(buf, info->data(), info->data_size());
250    buf += info->data_size();
251
252    memcpy(buf, kTracksHeader, kTracksHeaderSize);
253    WriteInt64(buf + kTracksSizeOffset, tracks_element_size);
254    buf += kTracksHeaderSize;
255
256    // TODO(xhwang): Simplify this! Probably have test data files that contain
257    // ContentEncodings directly instead of trying to create one at run-time.
258    if (has_audio) {
259      memcpy(buf, audio_track_entry->data(),
260             audio_track_entry->data_size());
261      if (is_audio_encrypted) {
262        memcpy(buf + audio_track_entry->data_size(),
263               audio_content_encodings->data(),
264               audio_content_encodings->data_size());
265        WriteInt64(buf + kAudioTrackSizeOffset,
266                   audio_track_entry->data_size() +
267                   audio_content_encodings->data_size() -
268                   kAudioTrackEntryHeaderSize);
269        buf += audio_content_encodings->data_size();
270      }
271      buf += audio_track_entry->data_size();
272    }
273
274    if (has_video) {
275      memcpy(buf, video_track_entry->data(),
276             video_track_entry->data_size());
277      if (is_video_encrypted) {
278        memcpy(buf + video_track_entry->data_size(),
279               video_content_encodings->data(),
280               video_content_encodings->data_size());
281        WriteInt64(buf + kVideoTrackSizeOffset,
282                   video_track_entry->data_size() +
283                   video_content_encodings->data_size() -
284                   kVideoTrackEntryHeaderSize);
285        buf += video_content_encodings->data_size();
286      }
287      buf += video_track_entry->data_size();
288    }
289
290    if (has_text) {
291      memcpy(buf, text_track_entry->data(),
292             text_track_entry->data_size());
293      buf += text_track_entry->data_size();
294    }
295  }
296
297  ChunkDemuxer::Status AddId() {
298    return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
299  }
300
301  ChunkDemuxer::Status AddId(const std::string& source_id, int stream_flags) {
302    bool has_audio = (stream_flags & HAS_AUDIO) != 0;
303    bool has_video = (stream_flags & HAS_VIDEO) != 0;
304    std::vector<std::string> codecs;
305    std::string type;
306
307    if (has_audio) {
308      codecs.push_back("vorbis");
309      type = "audio/webm";
310    }
311
312    if (has_video) {
313      codecs.push_back("vp8");
314      type = "video/webm";
315    }
316
317    if (!has_audio && !has_video) {
318      return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
319    }
320
321    return demuxer_->AddId(source_id, type, codecs,
322                           use_legacy_frame_processor_);
323  }
324
325  ChunkDemuxer::Status AddIdForMp2tSource(const std::string& source_id) {
326    std::vector<std::string> codecs;
327    std::string type = "video/mp2t";
328    codecs.push_back("mp4a.40.2");
329    codecs.push_back("avc1.640028");
330    return demuxer_->AddId(source_id, type, codecs,
331                           use_legacy_frame_processor_);
332  }
333
334  void AppendData(const uint8* data, size_t length) {
335    AppendData(kSourceId, data, length);
336  }
337
338  void AppendCluster(const std::string& source_id,
339                     scoped_ptr<Cluster> cluster) {
340    AppendData(source_id, cluster->data(), cluster->size());
341  }
342
343  void AppendCluster(scoped_ptr<Cluster> cluster) {
344    AppendCluster(kSourceId, cluster.Pass());
345  }
346
347  void AppendCluster(int timecode, int block_count) {
348    AppendCluster(GenerateCluster(timecode, block_count));
349  }
350
351  void AppendSingleStreamCluster(const std::string& source_id, int track_number,
352                                 int timecode, int block_count) {
353    int block_duration = 0;
354    switch (track_number) {
355      case kVideoTrackNum:
356        block_duration = kVideoBlockDuration;
357        break;
358      case kAudioTrackNum:
359        block_duration = kAudioBlockDuration;
360        break;
361      case kTextTrackNum:
362        block_duration = kTextBlockDuration;
363        break;
364    }
365    ASSERT_NE(block_duration, 0);
366    int end_timecode = timecode + block_count * block_duration;
367    AppendCluster(source_id,
368                  GenerateSingleStreamCluster(
369                      timecode, end_timecode, track_number, block_duration));
370  }
371
372  // |cluster_description| - A space delimited string of buffer info that
373  //  is used to construct a cluster. Each buffer info is a timestamp in
374  //  milliseconds and optionally followed by a 'K' to indicate that a buffer
375  //  should be marked as a keyframe. For example "0K 30 60" should constuct
376  //  a cluster with 3 blocks: a keyframe with timestamp 0 and 2 non-keyframes
377  //  at 30ms and 60ms.
378  void AppendSingleStreamCluster(const std::string& source_id, int track_number,
379                                 const std::string& cluster_description) {
380    std::vector<std::string> timestamps;
381    base::SplitString(cluster_description, ' ', &timestamps);
382
383    ClusterBuilder cb;
384    std::vector<uint8> data(10);
385    for (size_t i = 0; i < timestamps.size(); ++i) {
386      std::string timestamp_str = timestamps[i];
387      int block_flags = 0;
388      if (EndsWith(timestamp_str, "K", true)) {
389        block_flags = kWebMFlagKeyframe;
390        // Remove the "K" off of the token.
391        timestamp_str = timestamp_str.substr(0, timestamps[i].length() - 1);
392      }
393      int timestamp_in_ms;
394      CHECK(base::StringToInt(timestamp_str, &timestamp_in_ms));
395
396      if (i == 0)
397        cb.SetClusterTimecode(timestamp_in_ms);
398
399      if (track_number == kTextTrackNum) {
400        cb.AddBlockGroup(track_number, timestamp_in_ms, kTextBlockDuration,
401                         block_flags, &data[0], data.size());
402      } else {
403        cb.AddSimpleBlock(track_number, timestamp_in_ms, block_flags,
404                          &data[0], data.size());
405      }
406    }
407    AppendCluster(source_id, cb.Finish());
408  }
409
410  void AppendData(const std::string& source_id,
411                  const uint8* data, size_t length) {
412    EXPECT_CALL(host_, AddBufferedTimeRange(_, _)).Times(AnyNumber());
413
414    // TODO(wolenetz): Test timestamp offset updating once "sequence" append
415    // mode processing is implemented. See http://crbug.com/249422.
416    demuxer_->AppendData(source_id, data, length,
417                         append_window_start_for_next_append_,
418                         append_window_end_for_next_append_,
419                         &timestamp_offset_map_[source_id]);
420  }
421
422  void AppendDataInPieces(const uint8* data, size_t length) {
423    AppendDataInPieces(data, length, 7);
424  }
425
426  void AppendDataInPieces(const uint8* data, size_t length, size_t piece_size) {
427    const uint8* start = data;
428    const uint8* end = data + length;
429    while (start < end) {
430      size_t append_size = std::min(piece_size,
431                                    static_cast<size_t>(end - start));
432      AppendData(start, append_size);
433      start += append_size;
434    }
435  }
436
437  void AppendInitSegment(int stream_flags) {
438    AppendInitSegmentWithSourceId(kSourceId, stream_flags);
439  }
440
441  void AppendInitSegmentWithSourceId(const std::string& source_id,
442                                     int stream_flags) {
443    AppendInitSegmentWithEncryptedInfo(source_id, stream_flags, false, false);
444  }
445
446  void AppendInitSegmentWithEncryptedInfo(const std::string& source_id,
447                                          int stream_flags,
448                                          bool is_audio_encrypted,
449                                          bool is_video_encrypted) {
450    scoped_ptr<uint8[]> info_tracks;
451    int info_tracks_size = 0;
452    CreateInitSegment(stream_flags,
453                      is_audio_encrypted, is_video_encrypted,
454                      &info_tracks, &info_tracks_size);
455    AppendData(source_id, info_tracks.get(), info_tracks_size);
456  }
457
458  void AppendGarbage() {
459    // Fill up an array with gibberish.
460    int garbage_cluster_size = 10;
461    scoped_ptr<uint8[]> garbage_cluster(new uint8[garbage_cluster_size]);
462    for (int i = 0; i < garbage_cluster_size; ++i)
463      garbage_cluster[i] = i;
464    AppendData(garbage_cluster.get(), garbage_cluster_size);
465  }
466
467  void InitDoneCalled(PipelineStatus expected_status,
468                      PipelineStatus status) {
469    EXPECT_EQ(status, expected_status);
470  }
471
472  void AppendEmptyCluster(int timecode) {
473    AppendCluster(GenerateEmptyCluster(timecode));
474  }
475
476  PipelineStatusCB CreateInitDoneCB(const base::TimeDelta& expected_duration,
477                                    PipelineStatus expected_status) {
478    if (expected_duration != kNoTimestamp())
479      EXPECT_CALL(host_, SetDuration(expected_duration));
480    return CreateInitDoneCB(expected_status);
481  }
482
483  PipelineStatusCB CreateInitDoneCB(PipelineStatus expected_status) {
484    return base::Bind(&ChunkDemuxerTest::InitDoneCalled,
485                      base::Unretained(this),
486                      expected_status);
487  }
488
489  enum StreamFlags {
490    HAS_AUDIO = 1 << 0,
491    HAS_VIDEO = 1 << 1,
492    HAS_TEXT = 1 << 2
493  };
494
495  bool InitDemuxer(int stream_flags) {
496    return InitDemuxerWithEncryptionInfo(stream_flags, false, false);
497  }
498
499  bool InitDemuxerWithEncryptionInfo(
500      int stream_flags, bool is_audio_encrypted, bool is_video_encrypted) {
501
502    PipelineStatus expected_status =
503        (stream_flags != 0) ? PIPELINE_OK : DEMUXER_ERROR_COULD_NOT_OPEN;
504
505    base::TimeDelta expected_duration = kNoTimestamp();
506    if (expected_status == PIPELINE_OK)
507      expected_duration = kDefaultDuration();
508
509    EXPECT_CALL(*this, DemuxerOpened());
510    demuxer_->Initialize(
511        &host_, CreateInitDoneCB(expected_duration, expected_status), true);
512
513    if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
514      return false;
515
516    AppendInitSegmentWithEncryptedInfo(
517        kSourceId, stream_flags,
518        is_audio_encrypted, is_video_encrypted);
519    return true;
520  }
521
522  bool InitDemuxerAudioAndVideoSourcesText(const std::string& audio_id,
523                                           const std::string& video_id,
524                                           bool has_text) {
525    EXPECT_CALL(*this, DemuxerOpened());
526    demuxer_->Initialize(
527        &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
528
529    if (AddId(audio_id, HAS_AUDIO) != ChunkDemuxer::kOk)
530      return false;
531    if (AddId(video_id, HAS_VIDEO) != ChunkDemuxer::kOk)
532      return false;
533
534    int audio_flags = HAS_AUDIO;
535    int video_flags = HAS_VIDEO;
536
537    if (has_text) {
538      audio_flags |= HAS_TEXT;
539      video_flags |= HAS_TEXT;
540    }
541
542    AppendInitSegmentWithSourceId(audio_id, audio_flags);
543    AppendInitSegmentWithSourceId(video_id, video_flags);
544    return true;
545  }
546
547  bool InitDemuxerAudioAndVideoSources(const std::string& audio_id,
548                                       const std::string& video_id) {
549    return InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, false);
550  }
551
552  // Initializes the demuxer with data from 2 files with different
553  // decoder configurations. This is used to test the decoder config change
554  // logic.
555  //
556  // bear-320x240.webm VideoDecoderConfig returns 320x240 for its natural_size()
557  // bear-640x360.webm VideoDecoderConfig returns 640x360 for its natural_size()
558  // The resulting video stream returns data from each file for the following
559  // time ranges.
560  // bear-320x240.webm : [0-501)       [801-2736)
561  // bear-640x360.webm :       [527-793)
562  //
563  // bear-320x240.webm AudioDecoderConfig returns 3863 for its extra_data_size()
564  // bear-640x360.webm AudioDecoderConfig returns 3935 for its extra_data_size()
565  // The resulting audio stream returns data from each file for the following
566  // time ranges.
567  // bear-320x240.webm : [0-524)       [779-2736)
568  // bear-640x360.webm :       [527-759)
569  bool InitDemuxerWithConfigChangeData() {
570    scoped_refptr<DecoderBuffer> bear1 = ReadTestDataFile("bear-320x240.webm");
571    scoped_refptr<DecoderBuffer> bear2 = ReadTestDataFile("bear-640x360.webm");
572
573    EXPECT_CALL(*this, DemuxerOpened());
574
575    demuxer_->Initialize(
576        &host_, CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744),
577                                 PIPELINE_OK), true);
578
579    if (AddId(kSourceId, HAS_AUDIO | HAS_VIDEO) != ChunkDemuxer::kOk)
580      return false;
581
582    // Append the whole bear1 file.
583    // TODO(wolenetz/acolwell): Remove this extra SetDuration expectation once
584    // the files are fixed to have the correct duration in their init segments,
585    // and the CreateInitDoneCB() call, above, is fixed to used that duration.
586    // See http://crbug.com/354284.
587    EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
588    AppendData(bear1->data(), bear1->data_size());
589    // Last audio frame has timestamp 2721 and duration 24 (estimated from max
590    // seen so far for audio track).
591    // Last video frame has timestamp 2703 and duration 33 (from TrackEntry
592    // DefaultDuration for video track).
593    CheckExpectedRanges(kSourceId, "{ [0,2736) }");
594
595    // Append initialization segment for bear2.
596    // Note: Offsets here and below are derived from
597    // media/test/data/bear-640x360-manifest.js and
598    // media/test/data/bear-320x240-manifest.js which were
599    // generated from media/test/data/bear-640x360.webm and
600    // media/test/data/bear-320x240.webm respectively.
601    AppendData(bear2->data(), 4340);
602
603    // Append a media segment that goes from [0.527000, 1.014000).
604    AppendData(bear2->data() + 55290, 18785);
605    CheckExpectedRanges(kSourceId, "{ [0,1027) [1201,2736) }");
606
607    // Append initialization segment for bear1 & fill gap with [779-1197)
608    // segment.
609    AppendData(bear1->data(), 4370);
610    AppendData(bear1->data() + 72737, 28183);
611    CheckExpectedRanges(kSourceId, "{ [0,2736) }");
612
613    MarkEndOfStream(PIPELINE_OK);
614    return true;
615  }
616
617  void ShutdownDemuxer() {
618    if (demuxer_) {
619      demuxer_->Shutdown();
620      message_loop_.RunUntilIdle();
621    }
622  }
623
624  void AddSimpleBlock(ClusterBuilder* cb, int track_num, int64 timecode) {
625    uint8 data[] = { 0x00 };
626    cb->AddSimpleBlock(track_num, timecode, 0, data, sizeof(data));
627  }
628
629  scoped_ptr<Cluster> GenerateCluster(int timecode, int block_count) {
630    return GenerateCluster(timecode, timecode, block_count);
631  }
632
633  void AddVideoBlockGroup(ClusterBuilder* cb, int track_num, int64 timecode,
634                          int duration, int flags) {
635    const uint8* data =
636        (flags & kWebMFlagKeyframe) != 0 ? kVP8Keyframe : kVP8Interframe;
637    int size = (flags & kWebMFlagKeyframe) != 0 ? sizeof(kVP8Keyframe) :
638        sizeof(kVP8Interframe);
639    cb->AddBlockGroup(track_num, timecode, duration, flags, data, size);
640  }
641
642  scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
643                                      int first_video_timecode,
644                                      int block_count) {
645    CHECK_GT(block_count, 0);
646
647    int size = 10;
648    scoped_ptr<uint8[]> data(new uint8[size]);
649
650    ClusterBuilder cb;
651    cb.SetClusterTimecode(std::min(first_audio_timecode, first_video_timecode));
652
653    if (block_count == 1) {
654      cb.AddBlockGroup(kAudioTrackNum, first_audio_timecode,
655                       kAudioBlockDuration, kWebMFlagKeyframe,
656                       data.get(), size);
657      return cb.Finish();
658    }
659
660    int audio_timecode = first_audio_timecode;
661    int video_timecode = first_video_timecode;
662
663    // Create simple blocks for everything except the last 2 blocks.
664    // The first video frame must be a keyframe.
665    uint8 video_flag = kWebMFlagKeyframe;
666    for (int i = 0; i < block_count - 2; i++) {
667      if (audio_timecode <= video_timecode) {
668        cb.AddSimpleBlock(kAudioTrackNum, audio_timecode, kWebMFlagKeyframe,
669                          data.get(), size);
670        audio_timecode += kAudioBlockDuration;
671        continue;
672      }
673
674      cb.AddSimpleBlock(kVideoTrackNum, video_timecode, video_flag, data.get(),
675                        size);
676      video_timecode += kVideoBlockDuration;
677      video_flag = 0;
678    }
679
680    // Make the last 2 blocks BlockGroups so that they don't get delayed by the
681    // block duration calculation logic.
682    if (audio_timecode <= video_timecode) {
683      cb.AddBlockGroup(kAudioTrackNum, audio_timecode, kAudioBlockDuration,
684                       kWebMFlagKeyframe, data.get(), size);
685      AddVideoBlockGroup(&cb, kVideoTrackNum, video_timecode,
686                         kVideoBlockDuration, video_flag);
687    } else {
688      AddVideoBlockGroup(&cb, kVideoTrackNum, video_timecode,
689                         kVideoBlockDuration, video_flag);
690      cb.AddBlockGroup(kAudioTrackNum, audio_timecode, kAudioBlockDuration,
691                       kWebMFlagKeyframe, data.get(), size);
692    }
693
694    return cb.Finish();
695  }
696
697  scoped_ptr<Cluster> GenerateSingleStreamCluster(int timecode,
698                                                  int end_timecode,
699                                                  int track_number,
700                                                  int block_duration) {
701    CHECK_GT(end_timecode, timecode);
702
703    std::vector<uint8> data(kBlockSize);
704
705    ClusterBuilder cb;
706    cb.SetClusterTimecode(timecode);
707
708    // Create simple blocks for everything except the last block.
709    while (timecode < (end_timecode - block_duration)) {
710      cb.AddSimpleBlock(track_number, timecode, kWebMFlagKeyframe,
711                        &data[0], data.size());
712      timecode += block_duration;
713    }
714
715    if (track_number == kVideoTrackNum) {
716      AddVideoBlockGroup(&cb, track_number, timecode, block_duration,
717                         kWebMFlagKeyframe);
718    } else {
719      cb.AddBlockGroup(track_number, timecode, block_duration,
720                       kWebMFlagKeyframe, &data[0], data.size());
721    }
722
723    return cb.Finish();
724  }
725
726  void Read(DemuxerStream::Type type, const DemuxerStream::ReadCB& read_cb) {
727    demuxer_->GetStream(type)->Read(read_cb);
728    message_loop_.RunUntilIdle();
729  }
730
731  void ReadAudio(const DemuxerStream::ReadCB& read_cb) {
732    Read(DemuxerStream::AUDIO, read_cb);
733  }
734
735  void ReadVideo(const DemuxerStream::ReadCB& read_cb) {
736    Read(DemuxerStream::VIDEO, read_cb);
737  }
738
739  void GenerateExpectedReads(int timecode, int block_count) {
740    GenerateExpectedReads(timecode, timecode, block_count);
741  }
742
743  void GenerateExpectedReads(int start_audio_timecode,
744                             int start_video_timecode,
745                             int block_count) {
746    CHECK_GT(block_count, 0);
747
748    if (block_count == 1) {
749      ExpectRead(DemuxerStream::AUDIO, start_audio_timecode);
750      return;
751    }
752
753    int audio_timecode = start_audio_timecode;
754    int video_timecode = start_video_timecode;
755
756    for (int i = 0; i < block_count; i++) {
757      if (audio_timecode <= video_timecode) {
758        ExpectRead(DemuxerStream::AUDIO, audio_timecode);
759        audio_timecode += kAudioBlockDuration;
760        continue;
761      }
762
763      ExpectRead(DemuxerStream::VIDEO, video_timecode);
764      video_timecode += kVideoBlockDuration;
765    }
766  }
767
768  void GenerateSingleStreamExpectedReads(int timecode,
769                                         int block_count,
770                                         DemuxerStream::Type type,
771                                         int block_duration) {
772    CHECK_GT(block_count, 0);
773    int stream_timecode = timecode;
774
775    for (int i = 0; i < block_count; i++) {
776      ExpectRead(type, stream_timecode);
777      stream_timecode += block_duration;
778    }
779  }
780
781  void GenerateAudioStreamExpectedReads(int timecode, int block_count) {
782    GenerateSingleStreamExpectedReads(
783        timecode, block_count, DemuxerStream::AUDIO, kAudioBlockDuration);
784  }
785
786  void GenerateVideoStreamExpectedReads(int timecode, int block_count) {
787    GenerateSingleStreamExpectedReads(
788        timecode, block_count, DemuxerStream::VIDEO, kVideoBlockDuration);
789  }
790
791  scoped_ptr<Cluster> GenerateEmptyCluster(int timecode) {
792    ClusterBuilder cb;
793    cb.SetClusterTimecode(timecode);
794    return cb.Finish();
795  }
796
797  void CheckExpectedRanges(const std::string& expected) {
798    CheckExpectedRanges(kSourceId, expected);
799  }
800
801  void CheckExpectedRanges(const std::string&  id,
802                           const std::string& expected) {
803    Ranges<base::TimeDelta> r = demuxer_->GetBufferedRanges(id);
804
805    std::stringstream ss;
806    ss << "{ ";
807    for (size_t i = 0; i < r.size(); ++i) {
808      ss << "[" << r.start(i).InMilliseconds() << ","
809         << r.end(i).InMilliseconds() << ") ";
810    }
811    ss << "}";
812    EXPECT_EQ(expected, ss.str());
813  }
814
815  MOCK_METHOD2(ReadDone, void(DemuxerStream::Status status,
816                              const scoped_refptr<DecoderBuffer>&));
817
818  void StoreStatusAndBuffer(DemuxerStream::Status* status_out,
819                            scoped_refptr<DecoderBuffer>* buffer_out,
820                            DemuxerStream::Status status,
821                            const scoped_refptr<DecoderBuffer>& buffer) {
822    *status_out = status;
823    *buffer_out = buffer;
824  }
825
826  void ReadUntilNotOkOrEndOfStream(DemuxerStream::Type type,
827                                   DemuxerStream::Status* status,
828                                   base::TimeDelta* last_timestamp) {
829    DemuxerStream* stream = demuxer_->GetStream(type);
830    scoped_refptr<DecoderBuffer> buffer;
831
832    *last_timestamp = kNoTimestamp();
833    do {
834      stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
835                              base::Unretained(this), status, &buffer));
836      base::MessageLoop::current()->RunUntilIdle();
837      if (*status == DemuxerStream::kOk && !buffer->end_of_stream())
838        *last_timestamp = buffer->timestamp();
839    } while (*status == DemuxerStream::kOk && !buffer->end_of_stream());
840  }
841
842  void ExpectEndOfStream(DemuxerStream::Type type) {
843    EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk, IsEndOfStream()));
844    demuxer_->GetStream(type)->Read(base::Bind(
845        &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
846    message_loop_.RunUntilIdle();
847  }
848
849  void ExpectRead(DemuxerStream::Type type, int64 timestamp_in_ms) {
850    EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk,
851                                HasTimestamp(timestamp_in_ms)));
852    demuxer_->GetStream(type)->Read(base::Bind(
853        &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
854    message_loop_.RunUntilIdle();
855  }
856
857  void ExpectConfigChanged(DemuxerStream::Type type) {
858    EXPECT_CALL(*this, ReadDone(DemuxerStream::kConfigChanged, _));
859    demuxer_->GetStream(type)->Read(base::Bind(
860        &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
861    message_loop_.RunUntilIdle();
862  }
863
864  void CheckExpectedBuffers(DemuxerStream* stream,
865                            const std::string& expected) {
866    std::vector<std::string> timestamps;
867    base::SplitString(expected, ' ', &timestamps);
868    std::stringstream ss;
869    for (size_t i = 0; i < timestamps.size(); ++i) {
870      DemuxerStream::Status status;
871      scoped_refptr<DecoderBuffer> buffer;
872      stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
873                              base::Unretained(this), &status, &buffer));
874      base::MessageLoop::current()->RunUntilIdle();
875      if (status != DemuxerStream::kOk || buffer->end_of_stream())
876        break;
877
878      if (i > 0)
879        ss << " ";
880      ss << buffer->timestamp().InMilliseconds();
881    }
882    EXPECT_EQ(expected, ss.str());
883  }
884
885  MOCK_METHOD1(Checkpoint, void(int id));
886
887  struct BufferTimestamps {
888    int video_time_ms;
889    int audio_time_ms;
890  };
891  static const int kSkip = -1;
892
893  // Test parsing a WebM file.
894  // |filename| - The name of the file in media/test/data to parse.
895  // |timestamps| - The expected timestamps on the parsed buffers.
896  //    a timestamp of kSkip indicates that a Read() call for that stream
897  //    shouldn't be made on that iteration of the loop. If both streams have
898  //    a kSkip then the loop will terminate.
899  bool ParseWebMFile(const std::string& filename,
900                     const BufferTimestamps* timestamps,
901                     const base::TimeDelta& duration) {
902    return ParseWebMFile(filename, timestamps, duration, HAS_AUDIO | HAS_VIDEO);
903  }
904
905  bool ParseWebMFile(const std::string& filename,
906                     const BufferTimestamps* timestamps,
907                     const base::TimeDelta& duration,
908                     int stream_flags) {
909    EXPECT_CALL(*this, DemuxerOpened());
910    demuxer_->Initialize(
911        &host_, CreateInitDoneCB(duration, PIPELINE_OK), true);
912
913    if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
914      return false;
915
916    // Read a WebM file into memory and send the data to the demuxer.
917    scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(filename);
918    AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
919
920    // Verify that the timestamps on the first few packets match what we
921    // expect.
922    for (size_t i = 0;
923         (timestamps[i].audio_time_ms != kSkip ||
924          timestamps[i].video_time_ms != kSkip);
925         i++) {
926      bool audio_read_done = false;
927      bool video_read_done = false;
928
929      if (timestamps[i].audio_time_ms != kSkip) {
930        ReadAudio(base::Bind(&OnReadDone,
931                             base::TimeDelta::FromMilliseconds(
932                                 timestamps[i].audio_time_ms),
933                             &audio_read_done));
934        EXPECT_TRUE(audio_read_done);
935      }
936
937      if (timestamps[i].video_time_ms != kSkip) {
938        ReadVideo(base::Bind(&OnReadDone,
939                             base::TimeDelta::FromMilliseconds(
940                                 timestamps[i].video_time_ms),
941                             &video_read_done));
942        EXPECT_TRUE(video_read_done);
943      }
944    }
945
946    return true;
947  }
948
949  MOCK_METHOD0(DemuxerOpened, void());
950  // TODO(xhwang): This is a workaround of the issue that move-only parameters
951  // are not supported in mocked methods. Remove this when the issue is fixed
952  // (http://code.google.com/p/googletest/issues/detail?id=395) or when we use
953  // std::string instead of scoped_ptr<uint8[]> (http://crbug.com/130689).
954  MOCK_METHOD3(NeedKeyMock, void(const std::string& type,
955                                 const uint8* init_data, int init_data_size));
956  void DemuxerNeedKey(const std::string& type,
957                      const std::vector<uint8>& init_data) {
958    const uint8* init_data_ptr = init_data.empty() ? NULL : &init_data[0];
959    NeedKeyMock(type, init_data_ptr, init_data.size());
960  }
961
962  void Seek(base::TimeDelta seek_time) {
963    demuxer_->StartWaitingForSeek(seek_time);
964    demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
965    message_loop_.RunUntilIdle();
966  }
967
968  void MarkEndOfStream(PipelineStatus status) {
969    demuxer_->MarkEndOfStream(status);
970    message_loop_.RunUntilIdle();
971  }
972
973  bool SetTimestampOffset(const std::string& id,
974                          base::TimeDelta timestamp_offset) {
975    if (demuxer_->IsParsingMediaSegment(id))
976      return false;
977
978    timestamp_offset_map_[id] = timestamp_offset;
979    return true;
980  }
981
982  base::MessageLoop message_loop_;
983  MockDemuxerHost host_;
984
985  scoped_ptr<ChunkDemuxer> demuxer_;
986  bool use_legacy_frame_processor_;
987
988  base::TimeDelta append_window_start_for_next_append_;
989  base::TimeDelta append_window_end_for_next_append_;
990
991  // Map of source id to timestamp offset to use for the next AppendData()
992  // operation for that source id.
993  std::map<std::string, base::TimeDelta> timestamp_offset_map_;
994
995 private:
996  DISALLOW_COPY_AND_ASSIGN(ChunkDemuxerTest);
997};
998
999TEST_P(ChunkDemuxerTest, Init) {
1000  // Test no streams, audio-only, video-only, and audio & video scenarios.
1001  // Audio and video streams can be encrypted or not encrypted.
1002  for (int i = 0; i < 16; i++) {
1003    bool has_audio = (i & 0x1) != 0;
1004    bool has_video = (i & 0x2) != 0;
1005    bool is_audio_encrypted = (i & 0x4) != 0;
1006    bool is_video_encrypted = (i & 0x8) != 0;
1007
1008    // No test on invalid combination.
1009    if ((!has_audio && is_audio_encrypted) ||
1010        (!has_video && is_video_encrypted)) {
1011      continue;
1012    }
1013
1014    CreateNewDemuxer();
1015
1016    if (is_audio_encrypted || is_video_encrypted) {
1017      int need_key_count = (is_audio_encrypted ? 1 : 0) +
1018                           (is_video_encrypted ? 1 : 0);
1019      EXPECT_CALL(*this, NeedKeyMock(kWebMEncryptInitDataType, NotNull(),
1020                                     DecryptConfig::kDecryptionKeySize))
1021          .Times(Exactly(need_key_count));
1022    }
1023
1024    int stream_flags = 0;
1025    if (has_audio)
1026      stream_flags |= HAS_AUDIO;
1027
1028    if (has_video)
1029      stream_flags |= HAS_VIDEO;
1030
1031    ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1032        stream_flags, is_audio_encrypted, is_video_encrypted));
1033
1034    DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1035    if (has_audio) {
1036      ASSERT_TRUE(audio_stream);
1037
1038      const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1039      EXPECT_EQ(kCodecVorbis, config.codec());
1040      EXPECT_EQ(32, config.bits_per_channel());
1041      EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
1042      EXPECT_EQ(44100, config.samples_per_second());
1043      EXPECT_TRUE(config.extra_data());
1044      EXPECT_GT(config.extra_data_size(), 0u);
1045      EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
1046      EXPECT_EQ(is_audio_encrypted,
1047                audio_stream->audio_decoder_config().is_encrypted());
1048      EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
1049                      ->supports_partial_append_window_trimming());
1050    } else {
1051      EXPECT_FALSE(audio_stream);
1052    }
1053
1054    DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1055    if (has_video) {
1056      EXPECT_TRUE(video_stream);
1057      EXPECT_EQ(is_video_encrypted,
1058                video_stream->video_decoder_config().is_encrypted());
1059      EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
1060                       ->supports_partial_append_window_trimming());
1061    } else {
1062      EXPECT_FALSE(video_stream);
1063    }
1064
1065    ShutdownDemuxer();
1066    demuxer_.reset();
1067  }
1068}
1069
1070// TODO(acolwell): Fold this test into Init tests since the tests are
1071// almost identical.
1072TEST_P(ChunkDemuxerTest, InitText) {
1073  // Test with 1 video stream and 1 text streams, and 0 or 1 audio streams.
1074  // No encryption cases handled here.
1075  bool has_video = true;
1076  bool is_audio_encrypted = false;
1077  bool is_video_encrypted = false;
1078  for (int i = 0; i < 2; i++) {
1079    bool has_audio = (i & 0x1) != 0;
1080
1081    CreateNewDemuxer();
1082
1083    DemuxerStream* text_stream = NULL;
1084    TextTrackConfig text_config;
1085    EXPECT_CALL(host_, AddTextStream(_, _))
1086        .WillOnce(DoAll(SaveArg<0>(&text_stream),
1087                        SaveArg<1>(&text_config)));
1088
1089    int stream_flags = HAS_TEXT;
1090    if (has_audio)
1091      stream_flags |= HAS_AUDIO;
1092
1093    if (has_video)
1094      stream_flags |= HAS_VIDEO;
1095
1096    ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1097        stream_flags, is_audio_encrypted, is_video_encrypted));
1098    ASSERT_TRUE(text_stream);
1099    EXPECT_EQ(DemuxerStream::TEXT, text_stream->type());
1100    EXPECT_EQ(kTextSubtitles, text_config.kind());
1101    EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(text_stream)
1102                     ->supports_partial_append_window_trimming());
1103
1104    DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1105    if (has_audio) {
1106      ASSERT_TRUE(audio_stream);
1107
1108      const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1109      EXPECT_EQ(kCodecVorbis, config.codec());
1110      EXPECT_EQ(32, config.bits_per_channel());
1111      EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
1112      EXPECT_EQ(44100, config.samples_per_second());
1113      EXPECT_TRUE(config.extra_data());
1114      EXPECT_GT(config.extra_data_size(), 0u);
1115      EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
1116      EXPECT_EQ(is_audio_encrypted,
1117                audio_stream->audio_decoder_config().is_encrypted());
1118      EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
1119                      ->supports_partial_append_window_trimming());
1120    } else {
1121      EXPECT_FALSE(audio_stream);
1122    }
1123
1124    DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1125    if (has_video) {
1126      EXPECT_TRUE(video_stream);
1127      EXPECT_EQ(is_video_encrypted,
1128                video_stream->video_decoder_config().is_encrypted());
1129      EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
1130                       ->supports_partial_append_window_trimming());
1131    } else {
1132      EXPECT_FALSE(video_stream);
1133    }
1134
1135    ShutdownDemuxer();
1136    demuxer_.reset();
1137  }
1138}
1139
1140// Make sure that the demuxer reports an error if Shutdown()
1141// is called before all the initialization segments are appended.
1142TEST_P(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppended) {
1143  EXPECT_CALL(*this, DemuxerOpened());
1144  demuxer_->Initialize(
1145      &host_, CreateInitDoneCB(
1146          kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1147
1148  EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1149  EXPECT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
1150
1151  AppendInitSegmentWithSourceId("audio", HAS_AUDIO);
1152
1153  ShutdownDemuxer();
1154}
1155
1156TEST_P(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppendedText) {
1157  EXPECT_CALL(*this, DemuxerOpened());
1158  demuxer_->Initialize(
1159      &host_, CreateInitDoneCB(
1160          kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1161
1162  EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1163  EXPECT_EQ(AddId("video_and_text", HAS_VIDEO), ChunkDemuxer::kOk);
1164
1165  EXPECT_CALL(host_, AddTextStream(_, _))
1166      .Times(Exactly(1));
1167
1168  AppendInitSegmentWithSourceId("video_and_text", HAS_VIDEO | HAS_TEXT);
1169
1170  ShutdownDemuxer();
1171}
1172
1173// Verifies that all streams waiting for data receive an end of stream
1174// buffer when Shutdown() is called.
1175TEST_P(ChunkDemuxerTest, Shutdown_EndOfStreamWhileWaitingForData) {
1176  DemuxerStream* text_stream = NULL;
1177  EXPECT_CALL(host_, AddTextStream(_, _))
1178      .WillOnce(SaveArg<0>(&text_stream));
1179  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1180
1181  DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1182  DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1183
1184  bool audio_read_done = false;
1185  bool video_read_done = false;
1186  bool text_read_done = false;
1187  audio_stream->Read(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
1188  video_stream->Read(base::Bind(&OnReadDone_EOSExpected, &video_read_done));
1189  text_stream->Read(base::Bind(&OnReadDone_EOSExpected, &text_read_done));
1190  message_loop_.RunUntilIdle();
1191
1192  EXPECT_FALSE(audio_read_done);
1193  EXPECT_FALSE(video_read_done);
1194  EXPECT_FALSE(text_read_done);
1195
1196  ShutdownDemuxer();
1197
1198  EXPECT_TRUE(audio_read_done);
1199  EXPECT_TRUE(video_read_done);
1200  EXPECT_TRUE(text_read_done);
1201}
1202
1203// Test that Seek() completes successfully when the first cluster
1204// arrives.
1205TEST_P(ChunkDemuxerTest, AppendDataAfterSeek) {
1206  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1207  AppendCluster(kDefaultFirstCluster());
1208
1209  InSequence s;
1210
1211  EXPECT_CALL(*this, Checkpoint(1));
1212
1213  Seek(base::TimeDelta::FromMilliseconds(46));
1214
1215  EXPECT_CALL(*this, Checkpoint(2));
1216
1217  Checkpoint(1);
1218
1219  AppendCluster(kDefaultSecondCluster());
1220
1221  message_loop_.RunUntilIdle();
1222
1223  Checkpoint(2);
1224}
1225
1226// Test that parsing errors are handled for clusters appended after init.
1227TEST_P(ChunkDemuxerTest, ErrorWhileParsingClusterAfterInit) {
1228  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1229  AppendCluster(kDefaultFirstCluster());
1230
1231  EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1232  AppendGarbage();
1233}
1234
1235// Test the case where a Seek() is requested while the parser
1236// is in the middle of cluster. This is to verify that the parser
1237// does not reset itself on a seek.
1238TEST_P(ChunkDemuxerTest, SeekWhileParsingCluster) {
1239  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1240
1241  InSequence s;
1242
1243  scoped_ptr<Cluster> cluster_a(GenerateCluster(0, 6));
1244
1245  // Split the cluster into two appends at an arbitrary point near the end.
1246  int first_append_size = cluster_a->size() - 11;
1247  int second_append_size = cluster_a->size() - first_append_size;
1248
1249  // Append the first part of the cluster.
1250  AppendData(cluster_a->data(), first_append_size);
1251
1252  ExpectRead(DemuxerStream::AUDIO, 0);
1253  ExpectRead(DemuxerStream::VIDEO, 0);
1254  ExpectRead(DemuxerStream::AUDIO, kAudioBlockDuration);
1255
1256  Seek(base::TimeDelta::FromSeconds(5));
1257
1258  // Append the rest of the cluster.
1259  AppendData(cluster_a->data() + first_append_size, second_append_size);
1260
1261  // Append the new cluster and verify that only the blocks
1262  // in the new cluster are returned.
1263  AppendCluster(GenerateCluster(5000, 6));
1264  GenerateExpectedReads(5000, 6);
1265}
1266
1267// Test the case where AppendData() is called before Init().
1268TEST_P(ChunkDemuxerTest, AppendDataBeforeInit) {
1269  scoped_ptr<uint8[]> info_tracks;
1270  int info_tracks_size = 0;
1271  CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1272                    false, false, &info_tracks, &info_tracks_size);
1273  demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
1274                       append_window_start_for_next_append_,
1275                       append_window_end_for_next_append_,
1276                       &timestamp_offset_map_[kSourceId]);
1277}
1278
1279// Make sure Read() callbacks are dispatched with the proper data.
1280TEST_P(ChunkDemuxerTest, Read) {
1281  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1282
1283  AppendCluster(kDefaultFirstCluster());
1284
1285  bool audio_read_done = false;
1286  bool video_read_done = false;
1287  ReadAudio(base::Bind(&OnReadDone,
1288                       base::TimeDelta::FromMilliseconds(0),
1289                       &audio_read_done));
1290  ReadVideo(base::Bind(&OnReadDone,
1291                       base::TimeDelta::FromMilliseconds(0),
1292                       &video_read_done));
1293
1294  EXPECT_TRUE(audio_read_done);
1295  EXPECT_TRUE(video_read_done);
1296}
1297
1298TEST_P(ChunkDemuxerTest, OutOfOrderClusters) {
1299  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1300  AppendCluster(kDefaultFirstCluster());
1301  AppendCluster(GenerateCluster(10, 4));
1302
1303  // Make sure that AppendCluster() does not fail with a cluster that has
1304  // overlaps with the previously appended cluster.
1305  AppendCluster(GenerateCluster(5, 4));
1306
1307  // Verify that AppendData() can still accept more data.
1308  scoped_ptr<Cluster> cluster_c(GenerateCluster(45, 2));
1309  demuxer_->AppendData(kSourceId, cluster_c->data(), cluster_c->size(),
1310                       append_window_start_for_next_append_,
1311                       append_window_end_for_next_append_,
1312                       &timestamp_offset_map_[kSourceId]);
1313}
1314
1315TEST_P(ChunkDemuxerTest, NonMonotonicButAboveClusterTimecode) {
1316  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1317  AppendCluster(kDefaultFirstCluster());
1318
1319  ClusterBuilder cb;
1320
1321  // Test the case where block timecodes are not monotonically
1322  // increasing but stay above the cluster timecode.
1323  cb.SetClusterTimecode(5);
1324  AddSimpleBlock(&cb, kAudioTrackNum, 5);
1325  AddSimpleBlock(&cb, kVideoTrackNum, 10);
1326  AddSimpleBlock(&cb, kAudioTrackNum, 7);
1327  AddSimpleBlock(&cb, kVideoTrackNum, 15);
1328
1329  EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1330  AppendCluster(cb.Finish());
1331
1332  // Verify that AppendData() ignores data after the error.
1333  scoped_ptr<Cluster> cluster_b(GenerateCluster(20, 2));
1334  demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
1335                       append_window_start_for_next_append_,
1336                       append_window_end_for_next_append_,
1337                       &timestamp_offset_map_[kSourceId]);
1338}
1339
1340TEST_P(ChunkDemuxerTest, BackwardsAndBeforeClusterTimecode) {
1341  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1342  AppendCluster(kDefaultFirstCluster());
1343
1344  ClusterBuilder cb;
1345
1346  // Test timecodes going backwards and including values less than the cluster
1347  // timecode.
1348  cb.SetClusterTimecode(5);
1349  AddSimpleBlock(&cb, kAudioTrackNum, 5);
1350  AddSimpleBlock(&cb, kVideoTrackNum, 5);
1351  AddSimpleBlock(&cb, kAudioTrackNum, 3);
1352  AddSimpleBlock(&cb, kVideoTrackNum, 3);
1353
1354  EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1355  AppendCluster(cb.Finish());
1356
1357  // Verify that AppendData() ignores data after the error.
1358  scoped_ptr<Cluster> cluster_b(GenerateCluster(6, 2));
1359  demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
1360                       append_window_start_for_next_append_,
1361                       append_window_end_for_next_append_,
1362                       &timestamp_offset_map_[kSourceId]);
1363}
1364
1365
1366TEST_P(ChunkDemuxerTest, PerStreamMonotonicallyIncreasingTimestamps) {
1367  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1368  AppendCluster(kDefaultFirstCluster());
1369
1370  ClusterBuilder cb;
1371
1372  // Test monotonic increasing timestamps on a per stream
1373  // basis.
1374  cb.SetClusterTimecode(5);
1375  AddSimpleBlock(&cb, kAudioTrackNum, 5);
1376  AddSimpleBlock(&cb, kVideoTrackNum, 5);
1377  AddSimpleBlock(&cb, kAudioTrackNum, 4);
1378  AddSimpleBlock(&cb, kVideoTrackNum, 7);
1379
1380  EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1381  AppendCluster(cb.Finish());
1382}
1383
1384// Test the case where a cluster is passed to AppendCluster() before
1385// INFO & TRACKS data.
1386TEST_P(ChunkDemuxerTest, ClusterBeforeInitSegment) {
1387  EXPECT_CALL(*this, DemuxerOpened());
1388  demuxer_->Initialize(
1389      &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1390
1391  ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1392
1393  AppendCluster(GenerateCluster(0, 1));
1394}
1395
1396// Test cases where we get an MarkEndOfStream() call during initialization.
1397TEST_P(ChunkDemuxerTest, EOSDuringInit) {
1398  EXPECT_CALL(*this, DemuxerOpened());
1399  demuxer_->Initialize(
1400      &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1401  MarkEndOfStream(PIPELINE_OK);
1402}
1403
1404TEST_P(ChunkDemuxerTest, EndOfStreamWithNoAppend) {
1405  EXPECT_CALL(*this, DemuxerOpened());
1406  demuxer_->Initialize(
1407      &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1408
1409  ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1410
1411  CheckExpectedRanges("{ }");
1412  MarkEndOfStream(PIPELINE_OK);
1413  ShutdownDemuxer();
1414  CheckExpectedRanges("{ }");
1415  demuxer_->RemoveId(kSourceId);
1416  demuxer_.reset();
1417}
1418
1419TEST_P(ChunkDemuxerTest, EndOfStreamWithNoMediaAppend) {
1420  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1421
1422  CheckExpectedRanges("{ }");
1423  MarkEndOfStream(PIPELINE_OK);
1424  CheckExpectedRanges("{ }");
1425}
1426
1427TEST_P(ChunkDemuxerTest, DecodeErrorEndOfStream) {
1428  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1429
1430  AppendCluster(kDefaultFirstCluster());
1431  CheckExpectedRanges(kDefaultFirstClusterRange);
1432
1433  EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1434  MarkEndOfStream(PIPELINE_ERROR_DECODE);
1435  CheckExpectedRanges(kDefaultFirstClusterRange);
1436}
1437
1438TEST_P(ChunkDemuxerTest, NetworkErrorEndOfStream) {
1439  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1440
1441  AppendCluster(kDefaultFirstCluster());
1442  CheckExpectedRanges(kDefaultFirstClusterRange);
1443
1444  EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_NETWORK));
1445  MarkEndOfStream(PIPELINE_ERROR_NETWORK);
1446}
1447
1448// Helper class to reduce duplicate code when testing end of stream
1449// Read() behavior.
1450class EndOfStreamHelper {
1451 public:
1452  explicit EndOfStreamHelper(Demuxer* demuxer)
1453      : demuxer_(demuxer),
1454        audio_read_done_(false),
1455        video_read_done_(false) {
1456  }
1457
1458  // Request a read on the audio and video streams.
1459  void RequestReads() {
1460    EXPECT_FALSE(audio_read_done_);
1461    EXPECT_FALSE(video_read_done_);
1462
1463    DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
1464    DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
1465
1466    audio->Read(base::Bind(&OnEndOfStreamReadDone, &audio_read_done_));
1467    video->Read(base::Bind(&OnEndOfStreamReadDone, &video_read_done_));
1468    base::MessageLoop::current()->RunUntilIdle();
1469  }
1470
1471  // Check to see if |audio_read_done_| and |video_read_done_| variables
1472  // match |expected|.
1473  void CheckIfReadDonesWereCalled(bool expected) {
1474    base::MessageLoop::current()->RunUntilIdle();
1475    EXPECT_EQ(expected, audio_read_done_);
1476    EXPECT_EQ(expected, video_read_done_);
1477  }
1478
1479 private:
1480  static void OnEndOfStreamReadDone(
1481      bool* called,
1482      DemuxerStream::Status status,
1483      const scoped_refptr<DecoderBuffer>& buffer) {
1484    EXPECT_EQ(status, DemuxerStream::kOk);
1485    EXPECT_TRUE(buffer->end_of_stream());
1486    *called = true;
1487  }
1488
1489  Demuxer* demuxer_;
1490  bool audio_read_done_;
1491  bool video_read_done_;
1492
1493  DISALLOW_COPY_AND_ASSIGN(EndOfStreamHelper);
1494};
1495
1496// Make sure that all pending reads that we don't have media data for get an
1497// "end of stream" buffer when MarkEndOfStream() is called.
1498TEST_P(ChunkDemuxerTest, EndOfStreamWithPendingReads) {
1499  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1500
1501  AppendCluster(GenerateCluster(0, 2));
1502
1503  bool audio_read_done_1 = false;
1504  bool video_read_done_1 = false;
1505  EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1506  EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1507
1508  ReadAudio(base::Bind(&OnReadDone,
1509                       base::TimeDelta::FromMilliseconds(0),
1510                       &audio_read_done_1));
1511  ReadVideo(base::Bind(&OnReadDone,
1512                       base::TimeDelta::FromMilliseconds(0),
1513                       &video_read_done_1));
1514  message_loop_.RunUntilIdle();
1515
1516  EXPECT_TRUE(audio_read_done_1);
1517  EXPECT_TRUE(video_read_done_1);
1518
1519  end_of_stream_helper_1.RequestReads();
1520
1521  EXPECT_CALL(host_, SetDuration(
1522      base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1523  MarkEndOfStream(PIPELINE_OK);
1524
1525  end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1526
1527  end_of_stream_helper_2.RequestReads();
1528  end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1529}
1530
1531// Make sure that all Read() calls after we get an MarkEndOfStream()
1532// call return an "end of stream" buffer.
1533TEST_P(ChunkDemuxerTest, ReadsAfterEndOfStream) {
1534  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1535
1536  AppendCluster(GenerateCluster(0, 2));
1537
1538  bool audio_read_done_1 = false;
1539  bool video_read_done_1 = false;
1540  EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1541  EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1542  EndOfStreamHelper end_of_stream_helper_3(demuxer_.get());
1543
1544  ReadAudio(base::Bind(&OnReadDone,
1545                       base::TimeDelta::FromMilliseconds(0),
1546                       &audio_read_done_1));
1547  ReadVideo(base::Bind(&OnReadDone,
1548                       base::TimeDelta::FromMilliseconds(0),
1549                       &video_read_done_1));
1550
1551  end_of_stream_helper_1.RequestReads();
1552
1553  EXPECT_TRUE(audio_read_done_1);
1554  EXPECT_TRUE(video_read_done_1);
1555  end_of_stream_helper_1.CheckIfReadDonesWereCalled(false);
1556
1557  EXPECT_CALL(host_, SetDuration(
1558      base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1559  MarkEndOfStream(PIPELINE_OK);
1560
1561  end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1562
1563  // Request a few more reads and make sure we immediately get
1564  // end of stream buffers.
1565  end_of_stream_helper_2.RequestReads();
1566  end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1567
1568  end_of_stream_helper_3.RequestReads();
1569  end_of_stream_helper_3.CheckIfReadDonesWereCalled(true);
1570}
1571
1572TEST_P(ChunkDemuxerTest, EndOfStreamDuringCanceledSeek) {
1573  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1574
1575  AppendCluster(0, 10);
1576  EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(138)));
1577  MarkEndOfStream(PIPELINE_OK);
1578
1579  // Start the first seek.
1580  Seek(base::TimeDelta::FromMilliseconds(20));
1581
1582  // Simulate another seek being requested before the first
1583  // seek has finished prerolling.
1584  base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(30);
1585  demuxer_->CancelPendingSeek(seek_time2);
1586
1587  // Finish second seek.
1588  Seek(seek_time2);
1589
1590  DemuxerStream::Status status;
1591  base::TimeDelta last_timestamp;
1592
1593  // Make sure audio can reach end of stream.
1594  ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
1595  ASSERT_EQ(status, DemuxerStream::kOk);
1596
1597  // Make sure video can reach end of stream.
1598  ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
1599  ASSERT_EQ(status, DemuxerStream::kOk);
1600}
1601
1602// Verify buffered range change behavior for audio/video/text tracks.
1603TEST_P(ChunkDemuxerTest, EndOfStreamRangeChanges) {
1604  DemuxerStream* text_stream = NULL;
1605
1606  EXPECT_CALL(host_, AddTextStream(_, _))
1607      .WillOnce(SaveArg<0>(&text_stream));
1608  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1609
1610  AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
1611  AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23K");
1612
1613  // Check expected ranges and verify that an empty text track does not
1614  // affect the expected ranges.
1615  CheckExpectedRanges(kSourceId, "{ [0,46) }");
1616
1617  EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
1618  MarkEndOfStream(PIPELINE_OK);
1619
1620  // Check expected ranges and verify that an empty text track does not
1621  // affect the expected ranges.
1622  CheckExpectedRanges(kSourceId, "{ [0,66) }");
1623
1624  // Unmark end of stream state and verify that the ranges return to
1625  // their pre-"end of stream" values.
1626  demuxer_->UnmarkEndOfStream();
1627  CheckExpectedRanges(kSourceId, "{ [0,46) }");
1628
1629  // Add text track data and verify that the buffered ranges don't change
1630  // since the intersection of all the tracks doesn't change.
1631  EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(200)));
1632  AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K");
1633  CheckExpectedRanges(kSourceId, "{ [0,46) }");
1634
1635  // Mark end of stream and verify that text track data is reflected in
1636  // the new range.
1637  MarkEndOfStream(PIPELINE_OK);
1638  CheckExpectedRanges(kSourceId, "{ [0,200) }");
1639}
1640
1641// Make sure AppendData() will accept elements that span multiple calls.
1642TEST_P(ChunkDemuxerTest, AppendingInPieces) {
1643  EXPECT_CALL(*this, DemuxerOpened());
1644  demuxer_->Initialize(
1645      &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
1646
1647  ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1648
1649  scoped_ptr<uint8[]> info_tracks;
1650  int info_tracks_size = 0;
1651  CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1652                    false, false, &info_tracks, &info_tracks_size);
1653
1654  scoped_ptr<Cluster> cluster_a(kDefaultFirstCluster());
1655  scoped_ptr<Cluster> cluster_b(kDefaultSecondCluster());
1656
1657  size_t buffer_size = info_tracks_size + cluster_a->size() + cluster_b->size();
1658  scoped_ptr<uint8[]> buffer(new uint8[buffer_size]);
1659  uint8* dst = buffer.get();
1660  memcpy(dst, info_tracks.get(), info_tracks_size);
1661  dst += info_tracks_size;
1662
1663  memcpy(dst, cluster_a->data(), cluster_a->size());
1664  dst += cluster_a->size();
1665
1666  memcpy(dst, cluster_b->data(), cluster_b->size());
1667  dst += cluster_b->size();
1668
1669  AppendDataInPieces(buffer.get(), buffer_size);
1670
1671  GenerateExpectedReads(0, 9);
1672}
1673
1674TEST_P(ChunkDemuxerTest, WebMFile_AudioAndVideo) {
1675  struct BufferTimestamps buffer_timestamps[] = {
1676    {0, 0},
1677    {33, 3},
1678    {67, 6},
1679    {100, 9},
1680    {133, 12},
1681    {kSkip, kSkip},
1682  };
1683
1684  // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
1685  // ParseWebMFile() call's expected duration, below, once the file is fixed to
1686  // have the correct duration in the init segment. See http://crbug.com/354284.
1687  EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
1688
1689  ASSERT_TRUE(ParseWebMFile("bear-320x240.webm", buffer_timestamps,
1690                            base::TimeDelta::FromMilliseconds(2744)));
1691}
1692
1693TEST_P(ChunkDemuxerTest, WebMFile_LiveAudioAndVideo) {
1694  struct BufferTimestamps buffer_timestamps[] = {
1695    {0, 0},
1696    {33, 3},
1697    {67, 6},
1698    {100, 9},
1699    {133, 12},
1700    {kSkip, kSkip},
1701  };
1702
1703  ASSERT_TRUE(ParseWebMFile("bear-320x240-live.webm", buffer_timestamps,
1704                            kInfiniteDuration()));
1705}
1706
1707TEST_P(ChunkDemuxerTest, WebMFile_AudioOnly) {
1708  struct BufferTimestamps buffer_timestamps[] = {
1709    {kSkip, 0},
1710    {kSkip, 3},
1711    {kSkip, 6},
1712    {kSkip, 9},
1713    {kSkip, 12},
1714    {kSkip, kSkip},
1715  };
1716
1717  // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
1718  // ParseWebMFile() call's expected duration, below, once the file is fixed to
1719  // have the correct duration in the init segment. See http://crbug.com/354284.
1720  EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
1721
1722  ASSERT_TRUE(ParseWebMFile("bear-320x240-audio-only.webm", buffer_timestamps,
1723                            base::TimeDelta::FromMilliseconds(2744),
1724                            HAS_AUDIO));
1725}
1726
1727TEST_P(ChunkDemuxerTest, WebMFile_VideoOnly) {
1728  struct BufferTimestamps buffer_timestamps[] = {
1729    {0, kSkip},
1730    {33, kSkip},
1731    {67, kSkip},
1732    {100, kSkip},
1733    {133, kSkip},
1734    {kSkip, kSkip},
1735  };
1736
1737  // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
1738  // ParseWebMFile() call's expected duration, below, once the file is fixed to
1739  // have the correct duration in the init segment. See http://crbug.com/354284.
1740  EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2736)));
1741
1742  ASSERT_TRUE(ParseWebMFile("bear-320x240-video-only.webm", buffer_timestamps,
1743                            base::TimeDelta::FromMilliseconds(2703),
1744                            HAS_VIDEO));
1745}
1746
1747TEST_P(ChunkDemuxerTest, WebMFile_AltRefFrames) {
1748  struct BufferTimestamps buffer_timestamps[] = {
1749    {0, 0},
1750    {33, 3},
1751    {33, 6},
1752    {67, 9},
1753    {100, 12},
1754    {kSkip, kSkip},
1755  };
1756
1757  ASSERT_TRUE(ParseWebMFile("bear-320x240-altref.webm", buffer_timestamps,
1758                            base::TimeDelta::FromMilliseconds(2767)));
1759}
1760
1761// Verify that we output buffers before the entire cluster has been parsed.
1762TEST_P(ChunkDemuxerTest, IncrementalClusterParsing) {
1763  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1764  AppendEmptyCluster(0);
1765
1766  scoped_ptr<Cluster> cluster(GenerateCluster(0, 6));
1767
1768  bool audio_read_done = false;
1769  bool video_read_done = false;
1770  ReadAudio(base::Bind(&OnReadDone,
1771                       base::TimeDelta::FromMilliseconds(0),
1772                       &audio_read_done));
1773  ReadVideo(base::Bind(&OnReadDone,
1774                       base::TimeDelta::FromMilliseconds(0),
1775                       &video_read_done));
1776
1777  // Make sure the reads haven't completed yet.
1778  EXPECT_FALSE(audio_read_done);
1779  EXPECT_FALSE(video_read_done);
1780
1781  // Append data one byte at a time until one or both reads complete.
1782  int i = 0;
1783  for (; i < cluster->size() && !(audio_read_done || video_read_done); ++i) {
1784    AppendData(cluster->data() + i, 1);
1785    message_loop_.RunUntilIdle();
1786  }
1787
1788  EXPECT_TRUE(audio_read_done || video_read_done);
1789  EXPECT_GT(i, 0);
1790  EXPECT_LT(i, cluster->size());
1791
1792  audio_read_done = false;
1793  video_read_done = false;
1794  ReadAudio(base::Bind(&OnReadDone,
1795                       base::TimeDelta::FromMilliseconds(23),
1796                       &audio_read_done));
1797  ReadVideo(base::Bind(&OnReadDone,
1798                       base::TimeDelta::FromMilliseconds(33),
1799                       &video_read_done));
1800
1801  // Make sure the reads haven't completed yet.
1802  EXPECT_FALSE(audio_read_done);
1803  EXPECT_FALSE(video_read_done);
1804
1805  // Append the remaining data.
1806  ASSERT_LT(i, cluster->size());
1807  AppendData(cluster->data() + i, cluster->size() - i);
1808
1809  message_loop_.RunUntilIdle();
1810
1811  EXPECT_TRUE(audio_read_done);
1812  EXPECT_TRUE(video_read_done);
1813}
1814
1815TEST_P(ChunkDemuxerTest, ParseErrorDuringInit) {
1816  EXPECT_CALL(*this, DemuxerOpened());
1817  demuxer_->Initialize(
1818      &host_, CreateInitDoneCB(
1819          kNoTimestamp(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1820
1821  ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1822
1823  uint8 tmp = 0;
1824  demuxer_->AppendData(kSourceId, &tmp, 1,
1825                       append_window_start_for_next_append_,
1826                       append_window_end_for_next_append_,
1827                       &timestamp_offset_map_[kSourceId]);
1828}
1829
1830TEST_P(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
1831  EXPECT_CALL(*this, DemuxerOpened());
1832  demuxer_->Initialize(
1833      &host_, CreateInitDoneCB(kNoTimestamp(),
1834                               DEMUXER_ERROR_COULD_NOT_OPEN), true);
1835
1836  std::vector<std::string> codecs(1);
1837  codecs[0] = "vorbis";
1838  ASSERT_EQ(demuxer_->AddId(kSourceId, "audio/webm", codecs,
1839                            use_legacy_frame_processor_),
1840            ChunkDemuxer::kOk);
1841
1842  AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
1843}
1844
1845TEST_P(ChunkDemuxerTest, AVHeadersWithVideoOnlyType) {
1846  EXPECT_CALL(*this, DemuxerOpened());
1847  demuxer_->Initialize(
1848      &host_, CreateInitDoneCB(kNoTimestamp(),
1849                               DEMUXER_ERROR_COULD_NOT_OPEN), true);
1850
1851  std::vector<std::string> codecs(1);
1852  codecs[0] = "vp8";
1853  ASSERT_EQ(demuxer_->AddId(kSourceId, "video/webm", codecs,
1854                            use_legacy_frame_processor_),
1855            ChunkDemuxer::kOk);
1856
1857  AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
1858}
1859
1860TEST_P(ChunkDemuxerTest, MultipleHeaders) {
1861  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1862
1863  AppendCluster(kDefaultFirstCluster());
1864
1865  // Append another identical initialization segment.
1866  AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
1867
1868  AppendCluster(kDefaultSecondCluster());
1869
1870  GenerateExpectedReads(0, 9);
1871}
1872
1873TEST_P(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideo) {
1874  std::string audio_id = "audio1";
1875  std::string video_id = "video1";
1876  ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
1877
1878  // Append audio and video data into separate source ids.
1879  AppendCluster(audio_id,
1880      GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
1881  GenerateAudioStreamExpectedReads(0, 4);
1882  AppendCluster(video_id,
1883      GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
1884  GenerateVideoStreamExpectedReads(0, 4);
1885}
1886
1887TEST_P(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideoText) {
1888  // TODO(matthewjheaney): Here and elsewhere, we need more tests
1889  // for inband text tracks (http://crbug/321455).
1890
1891  std::string audio_id = "audio1";
1892  std::string video_id = "video1";
1893
1894  EXPECT_CALL(host_, AddTextStream(_, _))
1895    .Times(Exactly(2));
1896  ASSERT_TRUE(InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, true));
1897
1898  // Append audio and video data into separate source ids.
1899  AppendCluster(audio_id,
1900      GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
1901  GenerateAudioStreamExpectedReads(0, 4);
1902  AppendCluster(video_id,
1903      GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
1904  GenerateVideoStreamExpectedReads(0, 4);
1905}
1906
1907TEST_P(ChunkDemuxerTest, AddIdFailures) {
1908  EXPECT_CALL(*this, DemuxerOpened());
1909  demuxer_->Initialize(
1910      &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
1911
1912  std::string audio_id = "audio1";
1913  std::string video_id = "video1";
1914
1915  ASSERT_EQ(AddId(audio_id, HAS_AUDIO), ChunkDemuxer::kOk);
1916
1917  // Adding an id with audio/video should fail because we already added audio.
1918  ASSERT_EQ(AddId(), ChunkDemuxer::kReachedIdLimit);
1919
1920  AppendInitSegmentWithSourceId(audio_id, HAS_AUDIO);
1921
1922  // Adding an id after append should fail.
1923  ASSERT_EQ(AddId(video_id, HAS_VIDEO), ChunkDemuxer::kReachedIdLimit);
1924}
1925
1926// Test that Read() calls after a RemoveId() return "end of stream" buffers.
1927TEST_P(ChunkDemuxerTest, RemoveId) {
1928  std::string audio_id = "audio1";
1929  std::string video_id = "video1";
1930  ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
1931
1932  // Append audio and video data into separate source ids.
1933  AppendCluster(audio_id,
1934      GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
1935  AppendCluster(video_id,
1936      GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
1937
1938  // Read() from audio should return normal buffers.
1939  GenerateAudioStreamExpectedReads(0, 4);
1940
1941  // Remove the audio id.
1942  demuxer_->RemoveId(audio_id);
1943
1944  // Read() from audio should return "end of stream" buffers.
1945  bool audio_read_done = false;
1946  ReadAudio(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
1947  message_loop_.RunUntilIdle();
1948  EXPECT_TRUE(audio_read_done);
1949
1950  // Read() from video should still return normal buffers.
1951  GenerateVideoStreamExpectedReads(0, 4);
1952}
1953
1954// Test that removing an ID immediately after adding it does not interfere with
1955// quota for new IDs in the future.
1956TEST_P(ChunkDemuxerTest, RemoveAndAddId) {
1957  std::string audio_id_1 = "audio1";
1958  ASSERT_TRUE(AddId(audio_id_1, HAS_AUDIO) == ChunkDemuxer::kOk);
1959  demuxer_->RemoveId(audio_id_1);
1960
1961  std::string audio_id_2 = "audio2";
1962  ASSERT_TRUE(AddId(audio_id_2, HAS_AUDIO) == ChunkDemuxer::kOk);
1963}
1964
1965TEST_P(ChunkDemuxerTest, SeekCanceled) {
1966  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1967
1968  // Append cluster at the beginning of the stream.
1969  AppendCluster(GenerateCluster(0, 4));
1970
1971  // Seek to an unbuffered region.
1972  Seek(base::TimeDelta::FromSeconds(50));
1973
1974  // Attempt to read in unbuffered area; should not fulfill the read.
1975  bool audio_read_done = false;
1976  bool video_read_done = false;
1977  ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
1978  ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
1979  EXPECT_FALSE(audio_read_done);
1980  EXPECT_FALSE(video_read_done);
1981
1982  // Now cancel the pending seek, which should flush the reads with empty
1983  // buffers.
1984  base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
1985  demuxer_->CancelPendingSeek(seek_time);
1986  message_loop_.RunUntilIdle();
1987  EXPECT_TRUE(audio_read_done);
1988  EXPECT_TRUE(video_read_done);
1989
1990  // A seek back to the buffered region should succeed.
1991  Seek(seek_time);
1992  GenerateExpectedReads(0, 4);
1993}
1994
1995TEST_P(ChunkDemuxerTest, SeekCanceledWhileWaitingForSeek) {
1996  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1997
1998  // Append cluster at the beginning of the stream.
1999  AppendCluster(GenerateCluster(0, 4));
2000
2001  // Start waiting for a seek.
2002  base::TimeDelta seek_time1 = base::TimeDelta::FromSeconds(50);
2003  base::TimeDelta seek_time2 = base::TimeDelta::FromSeconds(0);
2004  demuxer_->StartWaitingForSeek(seek_time1);
2005
2006  // Now cancel the upcoming seek to an unbuffered region.
2007  demuxer_->CancelPendingSeek(seek_time2);
2008  demuxer_->Seek(seek_time1, NewExpectedStatusCB(PIPELINE_OK));
2009
2010  // Read requests should be fulfilled with empty buffers.
2011  bool audio_read_done = false;
2012  bool video_read_done = false;
2013  ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
2014  ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
2015  EXPECT_TRUE(audio_read_done);
2016  EXPECT_TRUE(video_read_done);
2017
2018  // A seek back to the buffered region should succeed.
2019  Seek(seek_time2);
2020  GenerateExpectedReads(0, 4);
2021}
2022
2023// Test that Seek() successfully seeks to all source IDs.
2024TEST_P(ChunkDemuxerTest, SeekAudioAndVideoSources) {
2025  std::string audio_id = "audio1";
2026  std::string video_id = "video1";
2027  ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2028
2029  AppendCluster(
2030      audio_id,
2031      GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2032  AppendCluster(
2033      video_id,
2034      GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2035
2036  // Read() should return buffers at 0.
2037  bool audio_read_done = false;
2038  bool video_read_done = false;
2039  ReadAudio(base::Bind(&OnReadDone,
2040                       base::TimeDelta::FromMilliseconds(0),
2041                       &audio_read_done));
2042  ReadVideo(base::Bind(&OnReadDone,
2043                       base::TimeDelta::FromMilliseconds(0),
2044                       &video_read_done));
2045  EXPECT_TRUE(audio_read_done);
2046  EXPECT_TRUE(video_read_done);
2047
2048  // Seek to 3 (an unbuffered region).
2049  Seek(base::TimeDelta::FromSeconds(3));
2050
2051  audio_read_done = false;
2052  video_read_done = false;
2053  ReadAudio(base::Bind(&OnReadDone,
2054                       base::TimeDelta::FromSeconds(3),
2055                       &audio_read_done));
2056  ReadVideo(base::Bind(&OnReadDone,
2057                       base::TimeDelta::FromSeconds(3),
2058                       &video_read_done));
2059  // Read()s should not return until after data is appended at the Seek point.
2060  EXPECT_FALSE(audio_read_done);
2061  EXPECT_FALSE(video_read_done);
2062
2063  AppendCluster(audio_id,
2064                GenerateSingleStreamCluster(
2065                    3000, 3092, kAudioTrackNum, kAudioBlockDuration));
2066  AppendCluster(video_id,
2067                GenerateSingleStreamCluster(
2068                    3000, 3132, kVideoTrackNum, kVideoBlockDuration));
2069
2070  message_loop_.RunUntilIdle();
2071
2072  // Read() should return buffers at 3.
2073  EXPECT_TRUE(audio_read_done);
2074  EXPECT_TRUE(video_read_done);
2075}
2076
2077// Test that Seek() completes successfully when EndOfStream
2078// is called before data is available for that seek point.
2079// This scenario might be useful if seeking past the end of stream
2080// of either audio or video (or both).
2081TEST_P(ChunkDemuxerTest, EndOfStreamAfterPastEosSeek) {
2082  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2083
2084  AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2085  AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2086
2087  // Seeking past the end of video.
2088  // Note: audio data is available for that seek point.
2089  bool seek_cb_was_called = false;
2090  base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(110);
2091  demuxer_->StartWaitingForSeek(seek_time);
2092  demuxer_->Seek(seek_time,
2093                 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2094  message_loop_.RunUntilIdle();
2095
2096  EXPECT_FALSE(seek_cb_was_called);
2097
2098  EXPECT_CALL(host_, SetDuration(
2099      base::TimeDelta::FromMilliseconds(120)));
2100  MarkEndOfStream(PIPELINE_OK);
2101  message_loop_.RunUntilIdle();
2102
2103  EXPECT_TRUE(seek_cb_was_called);
2104
2105  ShutdownDemuxer();
2106}
2107
2108// Test that EndOfStream is ignored if coming during a pending seek
2109// whose seek time is before some existing ranges.
2110TEST_P(ChunkDemuxerTest, EndOfStreamDuringPendingSeek) {
2111  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2112
2113  AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2114  AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2115  AppendCluster(GenerateSingleStreamCluster(200, 300, kAudioTrackNum, 10));
2116  AppendCluster(GenerateSingleStreamCluster(200, 300, kVideoTrackNum, 5));
2117
2118  bool seek_cb_was_called = false;
2119  base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(160);
2120  demuxer_->StartWaitingForSeek(seek_time);
2121  demuxer_->Seek(seek_time,
2122                 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2123  message_loop_.RunUntilIdle();
2124
2125  EXPECT_FALSE(seek_cb_was_called);
2126
2127  EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(300)));
2128  MarkEndOfStream(PIPELINE_OK);
2129  message_loop_.RunUntilIdle();
2130
2131  EXPECT_FALSE(seek_cb_was_called);
2132
2133  demuxer_->UnmarkEndOfStream();
2134
2135  AppendCluster(GenerateSingleStreamCluster(140, 180, kAudioTrackNum, 10));
2136  AppendCluster(GenerateSingleStreamCluster(140, 180, kVideoTrackNum, 5));
2137
2138  message_loop_.RunUntilIdle();
2139
2140  EXPECT_TRUE(seek_cb_was_called);
2141
2142  ShutdownDemuxer();
2143}
2144
2145// Test ranges in an audio-only stream.
2146TEST_P(ChunkDemuxerTest, GetBufferedRanges_AudioIdOnly) {
2147  EXPECT_CALL(*this, DemuxerOpened());
2148  demuxer_->Initialize(
2149      &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2150
2151  ASSERT_EQ(AddId(kSourceId, HAS_AUDIO), ChunkDemuxer::kOk);
2152  AppendInitSegment(HAS_AUDIO);
2153
2154  // Test a simple cluster.
2155  AppendCluster(
2156      GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2157
2158  CheckExpectedRanges("{ [0,92) }");
2159
2160  // Append a disjoint cluster to check for two separate ranges.
2161  AppendCluster(GenerateSingleStreamCluster(
2162      150, 219, kAudioTrackNum, kAudioBlockDuration));
2163
2164  CheckExpectedRanges("{ [0,92) [150,219) }");
2165}
2166
2167// Test ranges in a video-only stream.
2168TEST_P(ChunkDemuxerTest, GetBufferedRanges_VideoIdOnly) {
2169  EXPECT_CALL(*this, DemuxerOpened());
2170  demuxer_->Initialize(
2171      &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2172
2173  ASSERT_EQ(AddId(kSourceId, HAS_VIDEO), ChunkDemuxer::kOk);
2174  AppendInitSegment(HAS_VIDEO);
2175
2176  // Test a simple cluster.
2177  AppendCluster(
2178      GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2179
2180  CheckExpectedRanges("{ [0,132) }");
2181
2182  // Append a disjoint cluster to check for two separate ranges.
2183  AppendCluster(GenerateSingleStreamCluster(
2184      200, 299, kVideoTrackNum, kVideoBlockDuration));
2185
2186  CheckExpectedRanges("{ [0,132) [200,299) }");
2187}
2188
2189TEST_P(ChunkDemuxerTest, GetBufferedRanges_AudioVideo) {
2190  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2191
2192  // Audio: 0 -> 23
2193  // Video: 0 -> 33
2194  // Buffered Range: 0 -> 23
2195  // Audio block duration is smaller than video block duration,
2196  // so the buffered ranges should correspond to the audio blocks.
2197  AppendCluster(GenerateSingleStreamCluster(
2198      0, kAudioBlockDuration, kAudioTrackNum, kAudioBlockDuration));
2199  AppendCluster(GenerateSingleStreamCluster(
2200      0, kVideoBlockDuration, kVideoTrackNum, kVideoBlockDuration));
2201
2202  CheckExpectedRanges("{ [0,23) }");
2203
2204  // Audio: 300 -> 400
2205  // Video: 320 -> 420
2206  // Buffered Range: 320 -> 400  (end overlap)
2207  AppendCluster(GenerateSingleStreamCluster(300, 400, kAudioTrackNum, 50));
2208  AppendCluster(GenerateSingleStreamCluster(320, 420, kVideoTrackNum, 50));
2209
2210  CheckExpectedRanges("{ [0,23) [320,400) }");
2211
2212  // Audio: 520 -> 590
2213  // Video: 500 -> 570
2214  // Buffered Range: 520 -> 570  (front overlap)
2215  AppendCluster(GenerateSingleStreamCluster(520, 590, kAudioTrackNum, 70));
2216  AppendCluster(GenerateSingleStreamCluster(500, 570, kVideoTrackNum, 70));
2217
2218  CheckExpectedRanges("{ [0,23) [320,400) [520,570) }");
2219
2220  // Audio: 720 -> 750
2221  // Video: 700 -> 770
2222  // Buffered Range: 720 -> 750  (complete overlap, audio)
2223  AppendCluster(GenerateSingleStreamCluster(720, 750, kAudioTrackNum, 30));
2224  AppendCluster(GenerateSingleStreamCluster(700, 770, kVideoTrackNum, 70));
2225
2226  CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) }");
2227
2228  // Audio: 900 -> 970
2229  // Video: 920 -> 950
2230  // Buffered Range: 920 -> 950  (complete overlap, video)
2231  AppendCluster(GenerateSingleStreamCluster(900, 970, kAudioTrackNum, 70));
2232  AppendCluster(GenerateSingleStreamCluster(920, 950, kVideoTrackNum, 30));
2233
2234  CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2235
2236  // Appending within buffered range should not affect buffered ranges.
2237  AppendCluster(GenerateSingleStreamCluster(930, 950, kAudioTrackNum, 20));
2238  CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2239
2240  // Appending to single stream outside buffered ranges should not affect
2241  // buffered ranges.
2242  AppendCluster(GenerateSingleStreamCluster(1230, 1240, kVideoTrackNum, 10));
2243  CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2244}
2245
2246TEST_P(ChunkDemuxerTest, GetBufferedRanges_AudioVideoText) {
2247  EXPECT_CALL(host_, AddTextStream(_, _));
2248  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
2249
2250  // Append audio & video data
2251  AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23");
2252  AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
2253
2254  // Verify that a text track with no cues does not result in an empty buffered
2255  // range.
2256  CheckExpectedRanges("{ [0,46) }");
2257
2258  // Add some text cues.
2259  AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K");
2260
2261  // Verify that the new cues did not affect the buffered ranges.
2262  CheckExpectedRanges("{ [0,46) }");
2263
2264  // Remove the buffered range.
2265  demuxer_->Remove(kSourceId, base::TimeDelta(),
2266                   base::TimeDelta::FromMilliseconds(46));
2267  CheckExpectedRanges("{ }");
2268}
2269
2270// Once MarkEndOfStream() is called, GetBufferedRanges should not cut off any
2271// over-hanging tails at the end of the ranges as this is likely due to block
2272// duration differences.
2273TEST_P(ChunkDemuxerTest, GetBufferedRanges_EndOfStream) {
2274  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2275
2276  AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23K");
2277  AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
2278
2279  CheckExpectedRanges("{ [0,46) }");
2280
2281  EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
2282  MarkEndOfStream(PIPELINE_OK);
2283
2284  // Verify that the range extends to the end of the video data.
2285  CheckExpectedRanges("{ [0,66) }");
2286
2287  // Verify that the range reverts to the intersection when end of stream
2288  // has been cancelled.
2289  demuxer_->UnmarkEndOfStream();
2290  CheckExpectedRanges("{ [0,46) }");
2291
2292  // Append and remove data so that the 2 streams' end ranges do not overlap.
2293
2294  EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(246)));
2295  EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(398)));
2296  AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "200K 223K");
2297  AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
2298                            "200K 233 266 299 332K 365");
2299
2300  // At this point, the per-stream ranges are as follows:
2301  // Audio: [0,46) [200,246)
2302  // Video: [0,66) [200,398)
2303  CheckExpectedRanges("{ [0,46) [200,246) }");
2304
2305  demuxer_->Remove(kSourceId, base::TimeDelta::FromMilliseconds(200),
2306                   base::TimeDelta::FromMilliseconds(300));
2307
2308  // At this point, the per-stream ranges are as follows:
2309  // Audio: [0,46)
2310  // Video: [0,66) [332,398)
2311  CheckExpectedRanges("{ [0,46) }");
2312
2313  AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "200K 223K");
2314  AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "200K 233");
2315
2316  // At this point, the per-stream ranges are as follows:
2317  // Audio: [0,46) [200,246)
2318  // Video: [0,66) [200,266) [332,398)
2319  // NOTE: The last range on each stream do not overlap in time.
2320  CheckExpectedRanges("{ [0,46) [200,246) }");
2321
2322  MarkEndOfStream(PIPELINE_OK);
2323
2324  // NOTE: The last range on each stream gets extended to the highest
2325  // end timestamp according to the spec. The last audio range gets extended
2326  // from [200,246) to [200,398) which is why the intersection results in the
2327  // middle range getting larger AND the new range appearing.
2328  CheckExpectedRanges("{ [0,46) [200,266) [332,398) }");
2329}
2330
2331TEST_P(ChunkDemuxerTest, DifferentStreamTimecodes) {
2332  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2333
2334  // Create a cluster where the video timecode begins 25ms after the audio.
2335  AppendCluster(GenerateCluster(0, 25, 8));
2336
2337  Seek(base::TimeDelta::FromSeconds(0));
2338  GenerateExpectedReads(0, 25, 8);
2339
2340  // Seek to 5 seconds.
2341  Seek(base::TimeDelta::FromSeconds(5));
2342
2343  // Generate a cluster to fulfill this seek, where audio timecode begins 25ms
2344  // after the video.
2345  AppendCluster(GenerateCluster(5025, 5000, 8));
2346  GenerateExpectedReads(5025, 5000, 8);
2347}
2348
2349TEST_P(ChunkDemuxerTest, DifferentStreamTimecodesSeparateSources) {
2350  std::string audio_id = "audio1";
2351  std::string video_id = "video1";
2352  ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2353
2354  // Generate two streams where the video stream starts 5ms after the audio
2355  // stream and append them.
2356  AppendCluster(audio_id, GenerateSingleStreamCluster(
2357      25, 4 * kAudioBlockDuration + 25, kAudioTrackNum, kAudioBlockDuration));
2358  AppendCluster(video_id, GenerateSingleStreamCluster(
2359      30, 4 * kVideoBlockDuration + 30, kVideoTrackNum, kVideoBlockDuration));
2360
2361  // Both streams should be able to fulfill a seek to 25.
2362  Seek(base::TimeDelta::FromMilliseconds(25));
2363  GenerateAudioStreamExpectedReads(25, 4);
2364  GenerateVideoStreamExpectedReads(30, 4);
2365}
2366
2367TEST_P(ChunkDemuxerTest, DifferentStreamTimecodesOutOfRange) {
2368  std::string audio_id = "audio1";
2369  std::string video_id = "video1";
2370  ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2371
2372  // Generate two streams where the video stream starts 10s after the audio
2373  // stream and append them.
2374  AppendCluster(audio_id, GenerateSingleStreamCluster(0,
2375      4 * kAudioBlockDuration + 0, kAudioTrackNum, kAudioBlockDuration));
2376  AppendCluster(video_id, GenerateSingleStreamCluster(10000,
2377      4 * kVideoBlockDuration + 10000, kVideoTrackNum, kVideoBlockDuration));
2378
2379  // Should not be able to fulfill a seek to 0.
2380  base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(0);
2381  demuxer_->StartWaitingForSeek(seek_time);
2382  demuxer_->Seek(seek_time,
2383                 NewExpectedStatusCB(PIPELINE_ERROR_ABORT));
2384  ExpectRead(DemuxerStream::AUDIO, 0);
2385  ExpectEndOfStream(DemuxerStream::VIDEO);
2386}
2387
2388TEST_P(ChunkDemuxerTest, ClusterWithNoBuffers) {
2389  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2390
2391  // Generate and append an empty cluster beginning at 0.
2392  AppendEmptyCluster(0);
2393
2394  // Sanity check that data can be appended after this cluster correctly.
2395  AppendCluster(GenerateCluster(0, 2));
2396  ExpectRead(DemuxerStream::AUDIO, 0);
2397  ExpectRead(DemuxerStream::VIDEO, 0);
2398}
2399
2400TEST_P(ChunkDemuxerTest, CodecPrefixMatching) {
2401  ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2402
2403#if defined(USE_PROPRIETARY_CODECS)
2404  expected = ChunkDemuxer::kOk;
2405#endif
2406
2407  std::vector<std::string> codecs;
2408  codecs.push_back("avc1.4D4041");
2409
2410  EXPECT_EQ(demuxer_->AddId("source_id", "video/mp4", codecs,
2411                            use_legacy_frame_processor_),
2412            expected);
2413}
2414
2415// Test codec ID's that are not compliant with RFC6381, but have been
2416// seen in the wild.
2417TEST_P(ChunkDemuxerTest, CodecIDsThatAreNotRFC6381Compliant) {
2418  ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2419
2420#if defined(USE_PROPRIETARY_CODECS)
2421  expected = ChunkDemuxer::kOk;
2422#endif
2423  const char* codec_ids[] = {
2424    // GPAC places leading zeros on the audio object type.
2425    "mp4a.40.02",
2426    "mp4a.40.05"
2427  };
2428
2429  for (size_t i = 0; i < arraysize(codec_ids); ++i) {
2430    std::vector<std::string> codecs;
2431    codecs.push_back(codec_ids[i]);
2432
2433    ChunkDemuxer::Status result =
2434        demuxer_->AddId("source_id", "audio/mp4", codecs,
2435                        use_legacy_frame_processor_);
2436
2437    EXPECT_EQ(result, expected)
2438        << "Fail to add codec_id '" << codec_ids[i] << "'";
2439
2440    if (result == ChunkDemuxer::kOk)
2441      demuxer_->RemoveId("source_id");
2442  }
2443}
2444
2445TEST_P(ChunkDemuxerTest, EndOfStreamStillSetAfterSeek) {
2446  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2447
2448  EXPECT_CALL(host_, SetDuration(_))
2449      .Times(AnyNumber());
2450
2451  base::TimeDelta kLastAudioTimestamp = base::TimeDelta::FromMilliseconds(92);
2452  base::TimeDelta kLastVideoTimestamp = base::TimeDelta::FromMilliseconds(99);
2453
2454  AppendCluster(kDefaultFirstCluster());
2455  AppendCluster(kDefaultSecondCluster());
2456  MarkEndOfStream(PIPELINE_OK);
2457
2458  DemuxerStream::Status status;
2459  base::TimeDelta last_timestamp;
2460
2461  // Verify that we can read audio & video to the end w/o problems.
2462  ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2463  EXPECT_EQ(DemuxerStream::kOk, status);
2464  EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2465
2466  ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2467  EXPECT_EQ(DemuxerStream::kOk, status);
2468  EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2469
2470  // Seek back to 0 and verify that we can read to the end again..
2471  Seek(base::TimeDelta::FromMilliseconds(0));
2472
2473  ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2474  EXPECT_EQ(DemuxerStream::kOk, status);
2475  EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2476
2477  ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2478  EXPECT_EQ(DemuxerStream::kOk, status);
2479  EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2480}
2481
2482TEST_P(ChunkDemuxerTest, GetBufferedRangesBeforeInitSegment) {
2483  EXPECT_CALL(*this, DemuxerOpened());
2484  demuxer_->Initialize(&host_, CreateInitDoneCB(PIPELINE_OK), true);
2485  ASSERT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
2486  ASSERT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
2487
2488  CheckExpectedRanges("audio", "{ }");
2489  CheckExpectedRanges("video", "{ }");
2490}
2491
2492// Test that Seek() completes successfully when the first cluster
2493// arrives.
2494TEST_P(ChunkDemuxerTest, EndOfStreamDuringSeek) {
2495  InSequence s;
2496
2497  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2498
2499  AppendCluster(kDefaultFirstCluster());
2500
2501  base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
2502  demuxer_->StartWaitingForSeek(seek_time);
2503
2504  AppendCluster(kDefaultSecondCluster());
2505  EXPECT_CALL(host_, SetDuration(
2506      base::TimeDelta::FromMilliseconds(kDefaultSecondClusterEndTimestamp)));
2507  MarkEndOfStream(PIPELINE_OK);
2508
2509  demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
2510
2511  GenerateExpectedReads(0, 4);
2512  GenerateExpectedReads(46, 66, 5);
2513
2514  EndOfStreamHelper end_of_stream_helper(demuxer_.get());
2515  end_of_stream_helper.RequestReads();
2516  end_of_stream_helper.CheckIfReadDonesWereCalled(true);
2517}
2518
2519TEST_P(ChunkDemuxerTest, ConfigChange_Video) {
2520  InSequence s;
2521
2522  ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2523
2524  DemuxerStream::Status status;
2525  base::TimeDelta last_timestamp;
2526
2527  DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2528
2529  // Fetch initial video config and verify it matches what we expect.
2530  const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2531  ASSERT_TRUE(video_config_1.IsValidConfig());
2532  EXPECT_EQ(video_config_1.natural_size().width(), 320);
2533  EXPECT_EQ(video_config_1.natural_size().height(), 240);
2534
2535  ExpectRead(DemuxerStream::VIDEO, 0);
2536
2537  ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2538
2539  ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2540  EXPECT_EQ(last_timestamp.InMilliseconds(), 501);
2541
2542  // Fetch the new decoder config.
2543  const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2544  ASSERT_TRUE(video_config_2.IsValidConfig());
2545  EXPECT_EQ(video_config_2.natural_size().width(), 640);
2546  EXPECT_EQ(video_config_2.natural_size().height(), 360);
2547
2548  ExpectRead(DemuxerStream::VIDEO, 527);
2549
2550  // Read until the next config change.
2551  ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2552  ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2553  EXPECT_EQ(last_timestamp.InMilliseconds(), 793);
2554
2555  // Get the new config and verify that it matches the first one.
2556  ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2557
2558  ExpectRead(DemuxerStream::VIDEO, 801);
2559
2560  // Read until the end of the stream just to make sure there aren't any other
2561  // config changes.
2562  ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2563  ASSERT_EQ(status, DemuxerStream::kOk);
2564}
2565
2566TEST_P(ChunkDemuxerTest, ConfigChange_Audio) {
2567  InSequence s;
2568
2569  ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2570
2571  DemuxerStream::Status status;
2572  base::TimeDelta last_timestamp;
2573
2574  DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
2575
2576  // Fetch initial audio config and verify it matches what we expect.
2577  const AudioDecoderConfig& audio_config_1 = audio->audio_decoder_config();
2578  ASSERT_TRUE(audio_config_1.IsValidConfig());
2579  EXPECT_EQ(audio_config_1.samples_per_second(), 44100);
2580  EXPECT_EQ(audio_config_1.extra_data_size(), 3863u);
2581
2582  ExpectRead(DemuxerStream::AUDIO, 0);
2583
2584  // The first config change seen is from a splice frame representing an overlap
2585  // of buffer from config 1 by buffers from config 2.
2586  ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2587  ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2588  EXPECT_EQ(last_timestamp.InMilliseconds(), 524);
2589
2590  // Fetch the new decoder config.
2591  const AudioDecoderConfig& audio_config_2 = audio->audio_decoder_config();
2592  ASSERT_TRUE(audio_config_2.IsValidConfig());
2593  EXPECT_EQ(audio_config_2.samples_per_second(), 44100);
2594  EXPECT_EQ(audio_config_2.extra_data_size(), 3935u);
2595
2596  // The next config change is from a splice frame representing an overlap of
2597  // buffers from config 2 by buffers from config 1.
2598  ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2599  ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2600  EXPECT_EQ(last_timestamp.InMilliseconds(), 782);
2601  ASSERT_TRUE(audio_config_1.Matches(audio->audio_decoder_config()));
2602
2603  // Read until the end of the stream just to make sure there aren't any other
2604  // config changes.
2605  ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2606  ASSERT_EQ(status, DemuxerStream::kOk);
2607  EXPECT_EQ(last_timestamp.InMilliseconds(), 2744);
2608}
2609
2610TEST_P(ChunkDemuxerTest, ConfigChange_Seek) {
2611  InSequence s;
2612
2613  ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2614
2615  DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2616
2617  // Fetch initial video config and verify it matches what we expect.
2618  const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2619  ASSERT_TRUE(video_config_1.IsValidConfig());
2620  EXPECT_EQ(video_config_1.natural_size().width(), 320);
2621  EXPECT_EQ(video_config_1.natural_size().height(), 240);
2622
2623  ExpectRead(DemuxerStream::VIDEO, 0);
2624
2625  // Seek to a location with a different config.
2626  Seek(base::TimeDelta::FromMilliseconds(527));
2627
2628  // Verify that the config change is signalled.
2629  ExpectConfigChanged(DemuxerStream::VIDEO);
2630
2631  // Fetch the new decoder config and verify it is what we expect.
2632  const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2633  ASSERT_TRUE(video_config_2.IsValidConfig());
2634  EXPECT_EQ(video_config_2.natural_size().width(), 640);
2635  EXPECT_EQ(video_config_2.natural_size().height(), 360);
2636
2637  // Verify that Read() will return a buffer now.
2638  ExpectRead(DemuxerStream::VIDEO, 527);
2639
2640  // Seek back to the beginning and verify we get another config change.
2641  Seek(base::TimeDelta::FromMilliseconds(0));
2642  ExpectConfigChanged(DemuxerStream::VIDEO);
2643  ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2644  ExpectRead(DemuxerStream::VIDEO, 0);
2645
2646  // Seek to a location that requires a config change and then
2647  // seek to a new location that has the same configuration as
2648  // the start of the file without a Read() in the middle.
2649  Seek(base::TimeDelta::FromMilliseconds(527));
2650  Seek(base::TimeDelta::FromMilliseconds(801));
2651
2652  // Verify that no config change is signalled.
2653  ExpectRead(DemuxerStream::VIDEO, 801);
2654  ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2655}
2656
2657TEST_P(ChunkDemuxerTest, TimestampPositiveOffset) {
2658  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2659
2660  ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(30)));
2661  AppendCluster(GenerateCluster(0, 2));
2662
2663  Seek(base::TimeDelta::FromMilliseconds(30000));
2664
2665  GenerateExpectedReads(30000, 2);
2666}
2667
2668TEST_P(ChunkDemuxerTest, TimestampNegativeOffset) {
2669  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2670
2671  ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(-1)));
2672  AppendCluster(GenerateCluster(1000, 2));
2673
2674  GenerateExpectedReads(0, 2);
2675}
2676
2677TEST_P(ChunkDemuxerTest, TimestampOffsetSeparateStreams) {
2678  std::string audio_id = "audio1";
2679  std::string video_id = "video1";
2680  ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2681
2682  ASSERT_TRUE(SetTimestampOffset(
2683      audio_id, base::TimeDelta::FromMilliseconds(-2500)));
2684  ASSERT_TRUE(SetTimestampOffset(
2685      video_id, base::TimeDelta::FromMilliseconds(-2500)));
2686  AppendCluster(audio_id, GenerateSingleStreamCluster(2500,
2687      2500 + kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
2688  AppendCluster(video_id, GenerateSingleStreamCluster(2500,
2689      2500 + kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
2690  GenerateAudioStreamExpectedReads(0, 4);
2691  GenerateVideoStreamExpectedReads(0, 4);
2692
2693  Seek(base::TimeDelta::FromMilliseconds(27300));
2694
2695  ASSERT_TRUE(SetTimestampOffset(
2696      audio_id, base::TimeDelta::FromMilliseconds(27300)));
2697  ASSERT_TRUE(SetTimestampOffset(
2698      video_id, base::TimeDelta::FromMilliseconds(27300)));
2699  AppendCluster(audio_id, GenerateSingleStreamCluster(
2700      0, kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
2701  AppendCluster(video_id, GenerateSingleStreamCluster(
2702      0, kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
2703  GenerateVideoStreamExpectedReads(27300, 4);
2704  GenerateAudioStreamExpectedReads(27300, 4);
2705}
2706
2707TEST_P(ChunkDemuxerTest, IsParsingMediaSegmentMidMediaSegment) {
2708  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2709
2710  scoped_ptr<Cluster> cluster = GenerateCluster(0, 2);
2711  // Append only part of the cluster data.
2712  AppendData(cluster->data(), cluster->size() - 13);
2713
2714  // Confirm we're in the middle of parsing a media segment.
2715  ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
2716
2717  demuxer_->Abort(kSourceId,
2718                  append_window_start_for_next_append_,
2719                  append_window_end_for_next_append_,
2720                  &timestamp_offset_map_[kSourceId]);
2721
2722  // After Abort(), parsing should no longer be in the middle of a media
2723  // segment.
2724  ASSERT_FALSE(demuxer_->IsParsingMediaSegment(kSourceId));
2725}
2726
2727#if defined(USE_PROPRIETARY_CODECS)
2728#if defined(ENABLE_MPEG2TS_STREAM_PARSER)
2729TEST_P(ChunkDemuxerTest, EmitBuffersDuringAbort) {
2730  EXPECT_CALL(*this, DemuxerOpened());
2731  demuxer_->Initialize(
2732      &host_, CreateInitDoneCB(kInfiniteDuration(), PIPELINE_OK), true);
2733  EXPECT_EQ(ChunkDemuxer::kOk, AddIdForMp2tSource(kSourceId));
2734
2735  // For info:
2736  // DTS/PTS derived using dvbsnoop -s ts -if bear-1280x720.ts -tssubdecode
2737  // Video: first PES:
2738  //        PTS: 126912 (0x0001efc0)  [= 90 kHz-Timestamp: 0:00:01.4101]
2739  //        DTS: 123909 (0x0001e405)  [= 90 kHz-Timestamp: 0:00:01.3767]
2740  // Audio: first PES:
2741  //        PTS: 126000 (0x0001ec30)  [= 90 kHz-Timestamp: 0:00:01.4000]
2742  //        DTS: 123910 (0x0001e406)  [= 90 kHz-Timestamp: 0:00:01.3767]
2743  // Video: last PES:
2744  //        PTS: 370155 (0x0005a5eb)  [= 90 kHz-Timestamp: 0:00:04.1128]
2745  //        DTS: 367152 (0x00059a30)  [= 90 kHz-Timestamp: 0:00:04.0794]
2746  // Audio: last PES:
2747  //        PTS: 353788 (0x000565fc)  [= 90 kHz-Timestamp: 0:00:03.9309]
2748
2749  scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-1280x720.ts");
2750  AppendData(kSourceId, buffer->data(), buffer->data_size());
2751
2752  // Confirm we're in the middle of parsing a media segment.
2753  ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
2754
2755  // Abort on the Mpeg2 TS parser triggers the emission of the last video
2756  // buffer which is pending in the stream parser.
2757  Ranges<base::TimeDelta> range_before_abort =
2758      demuxer_->GetBufferedRanges(kSourceId);
2759  demuxer_->Abort(kSourceId,
2760                  append_window_start_for_next_append_,
2761                  append_window_end_for_next_append_,
2762                  &timestamp_offset_map_[kSourceId]);
2763  Ranges<base::TimeDelta> range_after_abort =
2764      demuxer_->GetBufferedRanges(kSourceId);
2765
2766  ASSERT_EQ(range_before_abort.size(), 1u);
2767  ASSERT_EQ(range_after_abort.size(), 1u);
2768  EXPECT_EQ(range_after_abort.start(0), range_before_abort.start(0));
2769  EXPECT_GT(range_after_abort.end(0), range_before_abort.end(0));
2770}
2771#endif
2772#endif
2773
2774TEST_P(ChunkDemuxerTest, WebMIsParsingMediaSegmentDetection) {
2775  // TODO(wolenetz): Also test 'unknown' sized clusters.
2776  // See http://crbug.com/335676.
2777  const uint8 kBuffer[] = {
2778    0x1F, 0x43, 0xB6, 0x75, 0x83,  // CLUSTER (size = 3)
2779    0xE7, 0x81, 0x01,                // Cluster TIMECODE (value = 1)
2780  };
2781
2782  // This array indicates expected return value of IsParsingMediaSegment()
2783  // following each incrementally appended byte in |kBuffer|.
2784  const bool kExpectedReturnValues[] = {
2785    false, false, false, false, true,
2786    true, true, false,
2787  };
2788
2789  COMPILE_ASSERT(arraysize(kBuffer) == arraysize(kExpectedReturnValues),
2790      test_arrays_out_of_sync);
2791  COMPILE_ASSERT(arraysize(kBuffer) == sizeof(kBuffer), not_one_byte_per_index);
2792
2793  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2794
2795  for (size_t i = 0; i < sizeof(kBuffer); i++) {
2796    DVLOG(3) << "Appending and testing index " << i;
2797    AppendData(kBuffer + i, 1);
2798    bool expected_return_value = kExpectedReturnValues[i];
2799    EXPECT_EQ(expected_return_value,
2800              demuxer_->IsParsingMediaSegment(kSourceId));
2801  }
2802}
2803
2804TEST_P(ChunkDemuxerTest, DurationChange) {
2805  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2806  const int kStreamDuration = kDefaultDuration().InMilliseconds();
2807
2808  // Add data leading up to the currently set duration.
2809  AppendCluster(GenerateCluster(kStreamDuration - kAudioBlockDuration,
2810                                kStreamDuration - kVideoBlockDuration,
2811                                2));
2812
2813  CheckExpectedRanges(kSourceId, "{ [201191,201224) }");
2814
2815  // Add data beginning at the currently set duration and expect a new duration
2816  // to be signaled. Note that the last video block will have a higher end
2817  // timestamp than the last audio block.
2818  if (use_legacy_frame_processor_) {
2819    const int kNewStreamDurationAudio = kStreamDuration + kAudioBlockDuration;
2820    EXPECT_CALL(host_, SetDuration(
2821      base::TimeDelta::FromMilliseconds(kNewStreamDurationAudio)));
2822  }
2823  const int kNewStreamDurationVideo = kStreamDuration + kVideoBlockDuration;
2824  EXPECT_CALL(host_, SetDuration(
2825      base::TimeDelta::FromMilliseconds(kNewStreamDurationVideo)));
2826  AppendCluster(GenerateCluster(kDefaultDuration().InMilliseconds(), 2));
2827
2828  CheckExpectedRanges(kSourceId, "{ [201191,201247) }");
2829
2830  // Add more data to the end of each media type. Note that the last audio block
2831  // will have a higher end timestamp than the last video block.
2832  const int kFinalStreamDuration = kStreamDuration + kAudioBlockDuration * 3;
2833  EXPECT_CALL(host_, SetDuration(
2834      base::TimeDelta::FromMilliseconds(kFinalStreamDuration)));
2835  AppendCluster(GenerateCluster(kStreamDuration + kAudioBlockDuration,
2836                                kStreamDuration + kVideoBlockDuration,
2837                                3));
2838
2839  // See that the range has increased appropriately (but not to the full
2840  // duration of 201293, since there is not enough video appended for that).
2841  CheckExpectedRanges(kSourceId, "{ [201191,201290) }");
2842}
2843
2844TEST_P(ChunkDemuxerTest, DurationChangeTimestampOffset) {
2845  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2846
2847  ASSERT_TRUE(SetTimestampOffset(kSourceId, kDefaultDuration()));
2848
2849  if (use_legacy_frame_processor_) {
2850    EXPECT_CALL(host_, SetDuration(
2851        kDefaultDuration() + base::TimeDelta::FromMilliseconds(
2852            kAudioBlockDuration * 2)));
2853  }
2854  EXPECT_CALL(host_, SetDuration(
2855      kDefaultDuration() + base::TimeDelta::FromMilliseconds(
2856          kVideoBlockDuration * 2)));
2857  AppendCluster(GenerateCluster(0, 4));
2858}
2859
2860TEST_P(ChunkDemuxerTest, EndOfStreamTruncateDuration) {
2861  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2862
2863  AppendCluster(kDefaultFirstCluster());
2864
2865  EXPECT_CALL(host_, SetDuration(
2866      base::TimeDelta::FromMilliseconds(kDefaultFirstClusterEndTimestamp)));
2867  MarkEndOfStream(PIPELINE_OK);
2868}
2869
2870
2871TEST_P(ChunkDemuxerTest, ZeroLengthAppend) {
2872  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2873  AppendData(NULL, 0);
2874}
2875
2876TEST_P(ChunkDemuxerTest, AppendAfterEndOfStream) {
2877  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2878
2879  EXPECT_CALL(host_, SetDuration(_))
2880      .Times(AnyNumber());
2881
2882  AppendCluster(kDefaultFirstCluster());
2883  MarkEndOfStream(PIPELINE_OK);
2884
2885  demuxer_->UnmarkEndOfStream();
2886
2887  AppendCluster(kDefaultSecondCluster());
2888  MarkEndOfStream(PIPELINE_OK);
2889}
2890
2891// Test receiving a Shutdown() call before we get an Initialize()
2892// call. This can happen if video element gets destroyed before
2893// the pipeline has a chance to initialize the demuxer.
2894TEST_P(ChunkDemuxerTest, Shutdown_BeforeInitialize) {
2895  demuxer_->Shutdown();
2896  demuxer_->Initialize(
2897      &host_, CreateInitDoneCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
2898  message_loop_.RunUntilIdle();
2899}
2900
2901// Verifies that signaling end of stream while stalled at a gap
2902// boundary does not trigger end of stream buffers to be returned.
2903TEST_P(ChunkDemuxerTest, EndOfStreamWhileWaitingForGapToBeFilled) {
2904  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2905
2906  AppendCluster(0, 10);
2907  AppendCluster(300, 10);
2908  CheckExpectedRanges(kSourceId, "{ [0,132) [300,432) }");
2909
2910  GenerateExpectedReads(0, 10);
2911
2912  bool audio_read_done = false;
2913  bool video_read_done = false;
2914  ReadAudio(base::Bind(&OnReadDone,
2915                       base::TimeDelta::FromMilliseconds(138),
2916                       &audio_read_done));
2917  ReadVideo(base::Bind(&OnReadDone,
2918                       base::TimeDelta::FromMilliseconds(138),
2919                       &video_read_done));
2920
2921  // Verify that the reads didn't complete
2922  EXPECT_FALSE(audio_read_done);
2923  EXPECT_FALSE(video_read_done);
2924
2925  EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(438)));
2926  MarkEndOfStream(PIPELINE_OK);
2927
2928  // Verify that the reads still haven't completed.
2929  EXPECT_FALSE(audio_read_done);
2930  EXPECT_FALSE(video_read_done);
2931
2932  demuxer_->UnmarkEndOfStream();
2933
2934  AppendCluster(138, 22);
2935
2936  message_loop_.RunUntilIdle();
2937
2938  CheckExpectedRanges(kSourceId, "{ [0,435) }");
2939
2940  // Verify that the reads have completed.
2941  EXPECT_TRUE(audio_read_done);
2942  EXPECT_TRUE(video_read_done);
2943
2944  // Read the rest of the buffers.
2945  GenerateExpectedReads(161, 171, 20);
2946
2947  // Verify that reads block because the append cleared the end of stream state.
2948  audio_read_done = false;
2949  video_read_done = false;
2950  ReadAudio(base::Bind(&OnReadDone_EOSExpected,
2951                       &audio_read_done));
2952  ReadVideo(base::Bind(&OnReadDone_EOSExpected,
2953                       &video_read_done));
2954
2955  // Verify that the reads don't complete.
2956  EXPECT_FALSE(audio_read_done);
2957  EXPECT_FALSE(video_read_done);
2958
2959  EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(437)));
2960  MarkEndOfStream(PIPELINE_OK);
2961
2962  EXPECT_TRUE(audio_read_done);
2963  EXPECT_TRUE(video_read_done);
2964}
2965
2966TEST_P(ChunkDemuxerTest, CanceledSeekDuringInitialPreroll) {
2967  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2968
2969  // Cancel preroll.
2970  base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(200);
2971  demuxer_->CancelPendingSeek(seek_time);
2972
2973  // Initiate the seek to the new location.
2974  Seek(seek_time);
2975
2976  // Append data to satisfy the seek.
2977  AppendCluster(seek_time.InMilliseconds(), 10);
2978}
2979
2980TEST_P(ChunkDemuxerTest, GCDuringSeek) {
2981  ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
2982
2983  demuxer_->SetMemoryLimitsForTesting(5 * kBlockSize);
2984
2985  base::TimeDelta seek_time1 = base::TimeDelta::FromMilliseconds(1000);
2986  base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(500);
2987
2988  // Initiate a seek to |seek_time1|.
2989  Seek(seek_time1);
2990
2991  // Append data to satisfy the first seek request.
2992  AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
2993                            seek_time1.InMilliseconds(), 5);
2994  CheckExpectedRanges(kSourceId, "{ [1000,1115) }");
2995
2996  // Signal that the second seek is starting.
2997  demuxer_->StartWaitingForSeek(seek_time2);
2998
2999  // Append data to satisfy the second seek. This append triggers
3000  // the garbage collection logic since we set the memory limit to
3001  // 5 blocks.
3002  AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3003                            seek_time2.InMilliseconds(), 5);
3004
3005  // Verify that the buffers that cover |seek_time2| do not get
3006  // garbage collected.
3007  CheckExpectedRanges(kSourceId, "{ [500,615) }");
3008
3009  // Complete the seek.
3010  demuxer_->Seek(seek_time2, NewExpectedStatusCB(PIPELINE_OK));
3011
3012
3013  // Append more data and make sure that the blocks for |seek_time2|
3014  // don't get removed.
3015  //
3016  // NOTE: The current GC algorithm tries to preserve the GOP at the
3017  //  current position as well as the last appended GOP. This is
3018  //  why there are 2 ranges in the expectations.
3019  AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 700, 5);
3020  CheckExpectedRanges(kSourceId, "{ [500,592) [792,815) }");
3021}
3022
3023TEST_P(ChunkDemuxerTest, RemoveBeforeInitSegment) {
3024    EXPECT_CALL(*this, DemuxerOpened());
3025    demuxer_->Initialize(
3026        &host_, CreateInitDoneCB(kNoTimestamp(), PIPELINE_OK), true);
3027
3028    EXPECT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO | HAS_VIDEO));
3029
3030    demuxer_->Remove(kSourceId, base::TimeDelta::FromMilliseconds(0),
3031                     base::TimeDelta::FromMilliseconds(1));
3032}
3033
3034TEST_P(ChunkDemuxerTest, AppendWindow_Video) {
3035  ASSERT_TRUE(InitDemuxer(HAS_VIDEO));
3036  DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3037
3038  // Set the append window to [20,280).
3039  append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3040  append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3041
3042  // Append a cluster that starts before and ends after the append window.
3043  AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3044                            "0K 30 60 90 120K 150 180 210 240K 270 300 330K");
3045
3046  // Verify that GOPs that start outside the window are not included
3047  // in the buffer. Also verify that buffers that start inside the
3048  // window and extend beyond the end of the window are not included.
3049  CheckExpectedRanges(kSourceId, "{ [120,270) }");
3050  CheckExpectedBuffers(stream, "120 150 180 210 240");
3051
3052  // Extend the append window to [20,650).
3053  append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3054
3055  // Append more data and verify that adding buffers start at the next
3056  // keyframe.
3057  AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3058                            "360 390 420K 450 480 510 540K 570 600 630K");
3059  CheckExpectedRanges(kSourceId, "{ [120,270) [420,630) }");
3060}
3061
3062TEST_P(ChunkDemuxerTest, AppendWindow_Audio) {
3063  ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3064  DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3065
3066  // Set the append window to [20,280).
3067  append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3068  append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3069
3070  // Append a cluster that starts before and ends after the append window.
3071  AppendSingleStreamCluster(
3072      kSourceId, kAudioTrackNum,
3073      "0K 30K 60K 90K 120K 150K 180K 210K 240K 270K 300K 330K");
3074
3075  // Verify that frames that end outside the window are not included
3076  // in the buffer. Also verify that buffers that start inside the
3077  // window and extend beyond the end of the window are not included.
3078  //
3079  // The first 20ms of the first buffer should be trimmed off since it
3080  // overlaps the start of the append window.
3081  CheckExpectedRanges(kSourceId, "{ [20,270) }");
3082  CheckExpectedBuffers(stream, "20 30 60 90 120 150 180 210 240");
3083
3084  // Extend the append window to [20,650).
3085  append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3086
3087  // Append more data and verify that a new range is created.
3088  AppendSingleStreamCluster(
3089      kSourceId, kAudioTrackNum,
3090      "360K 390K 420K 450K 480K 510K 540K 570K 600K 630K");
3091  CheckExpectedRanges(kSourceId, "{ [20,270) [360,630) }");
3092}
3093
3094TEST_P(ChunkDemuxerTest, AppendWindow_AudioOverlapStartAndEnd) {
3095  ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3096
3097  // Set the append window to [10,20).
3098  append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(10);
3099  append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3100
3101  // Append a cluster that starts before and ends after the append window.
3102  AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K");
3103
3104  // Verify that everything is dropped in this case.  No partial append should
3105  // be generated.
3106  CheckExpectedRanges(kSourceId, "{ }");
3107}
3108
3109TEST_P(ChunkDemuxerTest, AppendWindow_Text) {
3110  DemuxerStream* text_stream = NULL;
3111  EXPECT_CALL(host_, AddTextStream(_, _))
3112      .WillOnce(SaveArg<0>(&text_stream));
3113  ASSERT_TRUE(InitDemuxer(HAS_VIDEO | HAS_TEXT));
3114  DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3115
3116  // Set the append window to [20,280).
3117  append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3118  append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3119
3120  // Append a cluster that starts before and ends after the append
3121  // window.
3122  AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3123                            "0K 30 60 90 120K 150 180 210 240K 270 300 330K");
3124  AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K 200K 300K");
3125
3126  // Verify that text cues that start outside the window are not included
3127  // in the buffer. Also verify that cues that extend beyond the
3128  // window are not included.
3129  CheckExpectedRanges(kSourceId, "{ [120,270) }");
3130  CheckExpectedBuffers(video_stream, "120 150 180 210 240");
3131  CheckExpectedBuffers(text_stream, "100");
3132
3133  // Extend the append window to [20,650).
3134  append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3135
3136  // Append more data and verify that a new range is created.
3137  AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3138                            "360 390 420K 450 480 510 540K 570 600 630K");
3139  AppendSingleStreamCluster(kSourceId, kTextTrackNum, "400K 500K 600K 700K");
3140  CheckExpectedRanges(kSourceId, "{ [120,270) [420,630) }");
3141
3142  // Seek to the new range and verify that the expected buffers are returned.
3143  Seek(base::TimeDelta::FromMilliseconds(420));
3144  CheckExpectedBuffers(video_stream, "420 450 480 510 540 570 600");
3145  CheckExpectedBuffers(text_stream, "400 500");
3146}
3147
3148TEST_P(ChunkDemuxerTest, StartWaitingForSeekAfterParseError) {
3149  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3150  EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
3151  AppendGarbage();
3152  base::TimeDelta seek_time = base::TimeDelta::FromSeconds(50);
3153  demuxer_->StartWaitingForSeek(seek_time);
3154}
3155
3156TEST_P(ChunkDemuxerTest, Remove_AudioVideoText) {
3157  DemuxerStream* text_stream = NULL;
3158  EXPECT_CALL(host_, AddTextStream(_, _))
3159      .WillOnce(SaveArg<0>(&text_stream));
3160  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3161
3162  DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3163  DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3164
3165  AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3166                            "0K 20K 40K 60K 80K 100K 120K 140K");
3167  AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3168                            "0K 30 60 90 120K 150 180");
3169  AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K 200K");
3170
3171  CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
3172  CheckExpectedBuffers(video_stream, "0 30 60 90 120 150 180");
3173  CheckExpectedBuffers(text_stream, "0 100 200");
3174
3175  // Remove the buffers that were added.
3176  demuxer_->Remove(kSourceId, base::TimeDelta(),
3177                   base::TimeDelta::FromMilliseconds(300));
3178
3179  // Verify that all the appended data has been removed.
3180  CheckExpectedRanges(kSourceId, "{ }");
3181
3182  // Append new buffers that are clearly different than the original
3183  // ones and verify that only the new buffers are returned.
3184  AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3185                            "1K 21K 41K 61K 81K 101K 121K 141K");
3186  AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3187                            "1K 31 61 91 121K 151 181");
3188  AppendSingleStreamCluster(kSourceId, kTextTrackNum, "1K 101K 201K");
3189
3190  Seek(base::TimeDelta());
3191  CheckExpectedBuffers(audio_stream, "1 21 41 61 81 101 121 141");
3192  CheckExpectedBuffers(video_stream, "1 31 61 91 121 151 181");
3193  CheckExpectedBuffers(text_stream, "1 101 201");
3194}
3195
3196// Verifies that a Seek() will complete without text cues for
3197// the seek point and will return cues after the seek position
3198// when they are eventually appended.
3199TEST_P(ChunkDemuxerTest, SeekCompletesWithoutTextCues) {
3200  DemuxerStream* text_stream = NULL;
3201  EXPECT_CALL(host_, AddTextStream(_, _))
3202      .WillOnce(SaveArg<0>(&text_stream));
3203  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3204
3205  DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3206  DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3207
3208  base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(120);
3209  bool seek_cb_was_called = false;
3210  demuxer_->StartWaitingForSeek(seek_time);
3211  demuxer_->Seek(seek_time,
3212                 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
3213  message_loop_.RunUntilIdle();
3214
3215  EXPECT_FALSE(seek_cb_was_called);
3216
3217  bool text_read_done = false;
3218  text_stream->Read(base::Bind(&OnReadDone,
3219                               base::TimeDelta::FromMilliseconds(125),
3220                               &text_read_done));
3221
3222  // Append audio & video data so the seek completes.
3223  AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3224                            "0K 20K 40K 60K 80K 100K 120K 140K 160K 180K");
3225  AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3226                            "0K 30 60 90 120K 150 180 210");
3227
3228  message_loop_.RunUntilIdle();
3229  EXPECT_TRUE(seek_cb_was_called);
3230  EXPECT_FALSE(text_read_done);
3231
3232  // Read some audio & video buffers to further verify seek completion.
3233  CheckExpectedBuffers(audio_stream, "120 140");
3234  CheckExpectedBuffers(video_stream, "120 150");
3235
3236  EXPECT_FALSE(text_read_done);
3237
3238  // Append text cues that start after the seek point and verify that
3239  // they are returned by Read() calls.
3240  AppendSingleStreamCluster(kSourceId, kTextTrackNum, "125K 175K 225K");
3241
3242  message_loop_.RunUntilIdle();
3243  EXPECT_TRUE(text_read_done);
3244
3245  // NOTE: we start at 175 here because the buffer at 125 was returned
3246  // to the pending read initiated above.
3247  CheckExpectedBuffers(text_stream, "175 225");
3248
3249  // Verify that audio & video streams continue to return expected values.
3250  CheckExpectedBuffers(audio_stream, "160 180");
3251  CheckExpectedBuffers(video_stream, "180 210");
3252}
3253
3254// Generate two sets of tests: one using FrameProcessor, and one using
3255// LegacyFrameProcessor.
3256INSTANTIATE_TEST_CASE_P(NewFrameProcessor, ChunkDemuxerTest, Values(false));
3257INSTANTIATE_TEST_CASE_P(LegacyFrameProcessor, ChunkDemuxerTest, Values(true));
3258
3259}  // namespace media
3260