chunk_demuxer_unittest.cc revision 5d1f7b1de12d16ceb2c938c56701a3e8bfa558f7
1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <algorithm>
6
7#include "base/bind.h"
8#include "base/message_loop/message_loop.h"
9#include "base/strings/string_number_conversions.h"
10#include "base/strings/string_split.h"
11#include "base/strings/string_util.h"
12#include "media/base/audio_decoder_config.h"
13#include "media/base/decoder_buffer.h"
14#include "media/base/decrypt_config.h"
15#include "media/base/mock_demuxer_host.h"
16#include "media/base/test_data_util.h"
17#include "media/base/test_helpers.h"
18#include "media/filters/chunk_demuxer.h"
19#include "media/formats/webm/cluster_builder.h"
20#include "media/formats/webm/webm_constants.h"
21#include "media/formats/webm/webm_crypto_helpers.h"
22#include "testing/gtest/include/gtest/gtest.h"
23
24using ::testing::AnyNumber;
25using ::testing::Exactly;
26using ::testing::InSequence;
27using ::testing::NotNull;
28using ::testing::Return;
29using ::testing::SaveArg;
30using ::testing::SetArgumentPointee;
31using ::testing::_;
32
33namespace media {
34
35const uint8 kTracksHeader[] = {
36  0x16, 0x54, 0xAE, 0x6B,  // Tracks ID
37  0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  // tracks(size = 0)
38};
39
40// WebM Block bytes that represent a VP8 keyframe.
41const uint8 kVP8Keyframe[] = {
42  0x010, 0x00, 0x00, 0x9d, 0x01, 0x2a, 0x00, 0x10, 0x00, 0x10, 0x00
43};
44
45// WebM Block bytes that represent a VP8 interframe.
46const uint8 kVP8Interframe[] = { 0x11, 0x00, 0x00 };
47
48const int kTracksHeaderSize = sizeof(kTracksHeader);
49const int kTracksSizeOffset = 4;
50
51// The size of TrackEntry element in test file "webm_vorbis_track_entry" starts
52// at index 1 and spans 8 bytes.
53const int kAudioTrackSizeOffset = 1;
54const int kAudioTrackSizeWidth = 8;
55const int kAudioTrackEntryHeaderSize =
56    kAudioTrackSizeOffset + kAudioTrackSizeWidth;
57
58// The size of TrackEntry element in test file "webm_vp8_track_entry" starts at
59// index 1 and spans 8 bytes.
60const int kVideoTrackSizeOffset = 1;
61const int kVideoTrackSizeWidth = 8;
62const int kVideoTrackEntryHeaderSize =
63    kVideoTrackSizeOffset + kVideoTrackSizeWidth;
64
65const int kVideoTrackNum = 1;
66const int kAudioTrackNum = 2;
67const int kTextTrackNum = 3;
68
69const int kAudioBlockDuration = 23;
70const int kVideoBlockDuration = 33;
71const int kTextBlockDuration = 100;
72const int kBlockSize = 10;
73
74const char kSourceId[] = "SourceId";
75const char kDefaultFirstClusterRange[] = "{ [0,46) }";
76const int kDefaultFirstClusterEndTimestamp = 66;
77const int kDefaultSecondClusterEndTimestamp = 132;
78
79base::TimeDelta kDefaultDuration() {
80  return base::TimeDelta::FromMilliseconds(201224);
81}
82
83// Write an integer into buffer in the form of vint that spans 8 bytes.
84// The data pointed by |buffer| should be at least 8 bytes long.
85// |number| should be in the range 0 <= number < 0x00FFFFFFFFFFFFFF.
86static void WriteInt64(uint8* buffer, int64 number) {
87  DCHECK(number >= 0 && number < GG_LONGLONG(0x00FFFFFFFFFFFFFF));
88  buffer[0] = 0x01;
89  int64 tmp = number;
90  for (int i = 7; i > 0; i--) {
91    buffer[i] = tmp & 0xff;
92    tmp >>= 8;
93  }
94}
95
96MATCHER_P(HasTimestamp, timestamp_in_ms, "") {
97  return arg.get() && !arg->end_of_stream() &&
98         arg->timestamp().InMilliseconds() == timestamp_in_ms;
99}
100
101MATCHER(IsEndOfStream, "") { return arg.get() && arg->end_of_stream(); }
102
103static void OnReadDone(const base::TimeDelta& expected_time,
104                       bool* called,
105                       DemuxerStream::Status status,
106                       const scoped_refptr<DecoderBuffer>& buffer) {
107  EXPECT_EQ(status, DemuxerStream::kOk);
108  EXPECT_EQ(expected_time, buffer->timestamp());
109  *called = true;
110}
111
112static void OnReadDone_AbortExpected(
113    bool* called, DemuxerStream::Status status,
114    const scoped_refptr<DecoderBuffer>& buffer) {
115  EXPECT_EQ(status, DemuxerStream::kAborted);
116  EXPECT_EQ(NULL, buffer.get());
117  *called = true;
118}
119
120static void OnReadDone_EOSExpected(bool* called,
121                                   DemuxerStream::Status status,
122                                   const scoped_refptr<DecoderBuffer>& buffer) {
123  EXPECT_EQ(status, DemuxerStream::kOk);
124  EXPECT_TRUE(buffer->end_of_stream());
125  *called = true;
126}
127
128static void OnSeekDone_OKExpected(bool* called, PipelineStatus status) {
129  EXPECT_EQ(status, PIPELINE_OK);
130  *called = true;
131}
132
133static void LogFunc(const std::string& str) { DVLOG(1) << str; }
134
135class ChunkDemuxerTest : public testing::Test {
136 protected:
137  enum CodecsIndex {
138    AUDIO,
139    VIDEO,
140    MAX_CODECS_INDEX
141  };
142
143  // Default cluster to append first for simple tests.
144  scoped_ptr<Cluster> kDefaultFirstCluster() {
145    return GenerateCluster(0, 4);
146  }
147
148  // Default cluster to append after kDefaultFirstCluster()
149  // has been appended. This cluster starts with blocks that
150  // have timestamps consistent with the end times of the blocks
151  // in kDefaultFirstCluster() so that these two clusters represent
152  // a continuous region.
153  scoped_ptr<Cluster> kDefaultSecondCluster() {
154    return GenerateCluster(46, 66, 5);
155  }
156
157  ChunkDemuxerTest() {
158    CreateNewDemuxer();
159  }
160
161  void CreateNewDemuxer() {
162    base::Closure open_cb =
163        base::Bind(&ChunkDemuxerTest::DemuxerOpened, base::Unretained(this));
164    Demuxer::NeedKeyCB need_key_cb =
165        base::Bind(&ChunkDemuxerTest::DemuxerNeedKey, base::Unretained(this));
166    demuxer_.reset(new ChunkDemuxer(open_cb, need_key_cb,
167                                    base::Bind(&LogFunc)));
168  }
169
170  virtual ~ChunkDemuxerTest() {
171    ShutdownDemuxer();
172  }
173
174  void CreateInitSegment(int stream_flags,
175                         bool is_audio_encrypted, bool is_video_encrypted,
176                         scoped_ptr<uint8[]>* buffer,
177                         int* size) {
178    bool has_audio = (stream_flags & HAS_AUDIO) != 0;
179    bool has_video = (stream_flags & HAS_VIDEO) != 0;
180    bool has_text = (stream_flags & HAS_TEXT) != 0;
181    scoped_refptr<DecoderBuffer> ebml_header;
182    scoped_refptr<DecoderBuffer> info;
183    scoped_refptr<DecoderBuffer> audio_track_entry;
184    scoped_refptr<DecoderBuffer> video_track_entry;
185    scoped_refptr<DecoderBuffer> audio_content_encodings;
186    scoped_refptr<DecoderBuffer> video_content_encodings;
187    scoped_refptr<DecoderBuffer> text_track_entry;
188
189    ebml_header = ReadTestDataFile("webm_ebml_element");
190
191    info = ReadTestDataFile("webm_info_element");
192
193    int tracks_element_size = 0;
194
195    if (has_audio) {
196      audio_track_entry = ReadTestDataFile("webm_vorbis_track_entry");
197      tracks_element_size += audio_track_entry->data_size();
198      if (is_audio_encrypted) {
199        audio_content_encodings = ReadTestDataFile("webm_content_encodings");
200        tracks_element_size += audio_content_encodings->data_size();
201      }
202    }
203
204    if (has_video) {
205      video_track_entry = ReadTestDataFile("webm_vp8_track_entry");
206      tracks_element_size += video_track_entry->data_size();
207      if (is_video_encrypted) {
208        video_content_encodings = ReadTestDataFile("webm_content_encodings");
209        tracks_element_size += video_content_encodings->data_size();
210      }
211    }
212
213    if (has_text) {
214      // TODO(matthewjheaney): create an abstraction to do
215      // this (http://crbug/321454).
216      // We need it to also handle the creation of multiple text tracks.
217      //
218      // This is the track entry for a text track,
219      // TrackEntry [AE], size=30
220      //   TrackNum [D7], size=1, val=3
221      //   TrackUID [73] [C5], size=1, value=3
222      //   TrackType [83], size=1, val=0x11
223      //   CodecId [86], size=18, val="D_WEBVTT/SUBTITLES"
224      const char str[] = "\xAE\x9E\xD7\x81\x03\x73\xC5\x81\x03"
225                         "\x83\x81\x11\x86\x92"
226                         "D_WEBVTT/SUBTITLES";
227      const int len = strlen(str);
228      DCHECK_EQ(len, 32);
229      const uint8* const buf = reinterpret_cast<const uint8*>(str);
230      text_track_entry = DecoderBuffer::CopyFrom(buf, len);
231      tracks_element_size += text_track_entry->data_size();
232    }
233
234    *size = ebml_header->data_size() + info->data_size() +
235        kTracksHeaderSize + tracks_element_size;
236
237    buffer->reset(new uint8[*size]);
238
239    uint8* buf = buffer->get();
240    memcpy(buf, ebml_header->data(), ebml_header->data_size());
241    buf += ebml_header->data_size();
242
243    memcpy(buf, info->data(), info->data_size());
244    buf += info->data_size();
245
246    memcpy(buf, kTracksHeader, kTracksHeaderSize);
247    WriteInt64(buf + kTracksSizeOffset, tracks_element_size);
248    buf += kTracksHeaderSize;
249
250    // TODO(xhwang): Simplify this! Probably have test data files that contain
251    // ContentEncodings directly instead of trying to create one at run-time.
252    if (has_audio) {
253      memcpy(buf, audio_track_entry->data(),
254             audio_track_entry->data_size());
255      if (is_audio_encrypted) {
256        memcpy(buf + audio_track_entry->data_size(),
257               audio_content_encodings->data(),
258               audio_content_encodings->data_size());
259        WriteInt64(buf + kAudioTrackSizeOffset,
260                   audio_track_entry->data_size() +
261                   audio_content_encodings->data_size() -
262                   kAudioTrackEntryHeaderSize);
263        buf += audio_content_encodings->data_size();
264      }
265      buf += audio_track_entry->data_size();
266    }
267
268    if (has_video) {
269      memcpy(buf, video_track_entry->data(),
270             video_track_entry->data_size());
271      if (is_video_encrypted) {
272        memcpy(buf + video_track_entry->data_size(),
273               video_content_encodings->data(),
274               video_content_encodings->data_size());
275        WriteInt64(buf + kVideoTrackSizeOffset,
276                   video_track_entry->data_size() +
277                   video_content_encodings->data_size() -
278                   kVideoTrackEntryHeaderSize);
279        buf += video_content_encodings->data_size();
280      }
281      buf += video_track_entry->data_size();
282    }
283
284    if (has_text) {
285      memcpy(buf, text_track_entry->data(),
286             text_track_entry->data_size());
287      buf += text_track_entry->data_size();
288    }
289  }
290
291  ChunkDemuxer::Status AddId() {
292    return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
293  }
294
295  ChunkDemuxer::Status AddId(const std::string& source_id, int stream_flags) {
296    bool has_audio = (stream_flags & HAS_AUDIO) != 0;
297    bool has_video = (stream_flags & HAS_VIDEO) != 0;
298    std::vector<std::string> codecs;
299    std::string type;
300
301    if (has_audio) {
302      codecs.push_back("vorbis");
303      type = "audio/webm";
304    }
305
306    if (has_video) {
307      codecs.push_back("vp8");
308      type = "video/webm";
309    }
310
311    if (!has_audio && !has_video) {
312      return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
313    }
314
315    return demuxer_->AddId(source_id, type, codecs);
316  }
317
318  void AppendData(const uint8* data, size_t length) {
319    AppendData(kSourceId, data, length);
320  }
321
322  void AppendCluster(const std::string& source_id,
323                     scoped_ptr<Cluster> cluster) {
324    AppendData(source_id, cluster->data(), cluster->size());
325  }
326
327  void AppendCluster(scoped_ptr<Cluster> cluster) {
328    AppendCluster(kSourceId, cluster.Pass());
329  }
330
331  void AppendCluster(int timecode, int block_count) {
332    AppendCluster(GenerateCluster(timecode, block_count));
333  }
334
335  void AppendSingleStreamCluster(const std::string& source_id, int track_number,
336                                 int timecode, int block_count) {
337    int block_duration = 0;
338    switch (track_number) {
339      case kVideoTrackNum:
340        block_duration = kVideoBlockDuration;
341        break;
342      case kAudioTrackNum:
343        block_duration = kAudioBlockDuration;
344        break;
345      case kTextTrackNum:
346        block_duration = kTextBlockDuration;
347        break;
348    }
349    ASSERT_NE(block_duration, 0);
350    int end_timecode = timecode + block_count * block_duration;
351    AppendCluster(source_id,
352                  GenerateSingleStreamCluster(
353                      timecode, end_timecode, track_number, block_duration));
354  }
355
356  // |cluster_description| - A space delimited string of buffer info that
357  //  is used to construct a cluster. Each buffer info is a timestamp in
358  //  milliseconds and optionally followed by a 'K' to indicate that a buffer
359  //  should be marked as a keyframe. For example "0K 30 60" should constuct
360  //  a cluster with 3 blocks: a keyframe with timestamp 0 and 2 non-keyframes
361  //  at 30ms and 60ms.
362  void AppendSingleStreamCluster(const std::string& source_id, int track_number,
363                                 const std::string& cluster_description) {
364    std::vector<std::string> timestamps;
365    base::SplitString(cluster_description, ' ', &timestamps);
366
367    ClusterBuilder cb;
368    std::vector<uint8> data(10);
369    for (size_t i = 0; i < timestamps.size(); ++i) {
370      std::string timestamp_str = timestamps[i];
371      int block_flags = 0;
372      if (EndsWith(timestamp_str, "K", true)) {
373        block_flags = kWebMFlagKeyframe;
374        // Remove the "K" off of the token.
375        timestamp_str = timestamp_str.substr(0, timestamps[i].length() - 1);
376      }
377      int timestamp_in_ms;
378      CHECK(base::StringToInt(timestamp_str, &timestamp_in_ms));
379
380      if (i == 0)
381        cb.SetClusterTimecode(timestamp_in_ms);
382
383      if (track_number == kTextTrackNum) {
384        cb.AddBlockGroup(track_number, timestamp_in_ms, kTextBlockDuration,
385                         block_flags, &data[0], data.size());
386      } else {
387        cb.AddSimpleBlock(track_number, timestamp_in_ms, block_flags,
388                          &data[0], data.size());
389      }
390    }
391    AppendCluster(source_id, cb.Finish());
392  }
393
394  void AppendData(const std::string& source_id,
395                  const uint8* data, size_t length) {
396    EXPECT_CALL(host_, AddBufferedTimeRange(_, _)).Times(AnyNumber());
397    demuxer_->AppendData(source_id, data, length);
398  }
399
400  void AppendDataInPieces(const uint8* data, size_t length) {
401    AppendDataInPieces(data, length, 7);
402  }
403
404  void AppendDataInPieces(const uint8* data, size_t length, size_t piece_size) {
405    const uint8* start = data;
406    const uint8* end = data + length;
407    while (start < end) {
408      size_t append_size = std::min(piece_size,
409                                    static_cast<size_t>(end - start));
410      AppendData(start, append_size);
411      start += append_size;
412    }
413  }
414
415  void AppendInitSegment(int stream_flags) {
416    AppendInitSegmentWithSourceId(kSourceId, stream_flags);
417  }
418
419  void AppendInitSegmentWithSourceId(const std::string& source_id,
420                                     int stream_flags) {
421    AppendInitSegmentWithEncryptedInfo(source_id, stream_flags, false, false);
422  }
423
424  void AppendInitSegmentWithEncryptedInfo(const std::string& source_id,
425                                          int stream_flags,
426                                          bool is_audio_encrypted,
427                                          bool is_video_encrypted) {
428    scoped_ptr<uint8[]> info_tracks;
429    int info_tracks_size = 0;
430    CreateInitSegment(stream_flags,
431                      is_audio_encrypted, is_video_encrypted,
432                      &info_tracks, &info_tracks_size);
433    AppendData(source_id, info_tracks.get(), info_tracks_size);
434  }
435
436  void AppendGarbage() {
437    // Fill up an array with gibberish.
438    int garbage_cluster_size = 10;
439    scoped_ptr<uint8[]> garbage_cluster(new uint8[garbage_cluster_size]);
440    for (int i = 0; i < garbage_cluster_size; ++i)
441      garbage_cluster[i] = i;
442    AppendData(garbage_cluster.get(), garbage_cluster_size);
443  }
444
445  void InitDoneCalled(PipelineStatus expected_status,
446                      PipelineStatus status) {
447    EXPECT_EQ(status, expected_status);
448  }
449
450  void AppendEmptyCluster(int timecode) {
451    AppendCluster(GenerateEmptyCluster(timecode));
452  }
453
454  PipelineStatusCB CreateInitDoneCB(const base::TimeDelta& expected_duration,
455                                    PipelineStatus expected_status) {
456    if (expected_duration != kNoTimestamp())
457      EXPECT_CALL(host_, SetDuration(expected_duration));
458    return CreateInitDoneCB(expected_status);
459  }
460
461  PipelineStatusCB CreateInitDoneCB(PipelineStatus expected_status) {
462    return base::Bind(&ChunkDemuxerTest::InitDoneCalled,
463                      base::Unretained(this),
464                      expected_status);
465  }
466
467  enum StreamFlags {
468    HAS_AUDIO = 1 << 0,
469    HAS_VIDEO = 1 << 1,
470    HAS_TEXT = 1 << 2
471  };
472
473  bool InitDemuxer(int stream_flags) {
474    return InitDemuxerWithEncryptionInfo(stream_flags, false, false);
475  }
476
477  bool InitDemuxerWithEncryptionInfo(
478      int stream_flags, bool is_audio_encrypted, bool is_video_encrypted) {
479
480    PipelineStatus expected_status =
481        (stream_flags != 0) ? PIPELINE_OK : DEMUXER_ERROR_COULD_NOT_OPEN;
482
483    base::TimeDelta expected_duration = kNoTimestamp();
484    if (expected_status == PIPELINE_OK)
485      expected_duration = kDefaultDuration();
486
487    EXPECT_CALL(*this, DemuxerOpened());
488    demuxer_->Initialize(
489        &host_, CreateInitDoneCB(expected_duration, expected_status), true);
490
491    if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
492      return false;
493
494    AppendInitSegmentWithEncryptedInfo(
495        kSourceId, stream_flags,
496        is_audio_encrypted, is_video_encrypted);
497    return true;
498  }
499
500  bool InitDemuxerAudioAndVideoSourcesText(const std::string& audio_id,
501                                           const std::string& video_id,
502                                           bool has_text) {
503    EXPECT_CALL(*this, DemuxerOpened());
504    demuxer_->Initialize(
505        &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
506
507    if (AddId(audio_id, HAS_AUDIO) != ChunkDemuxer::kOk)
508      return false;
509    if (AddId(video_id, HAS_VIDEO) != ChunkDemuxer::kOk)
510      return false;
511
512    int audio_flags = HAS_AUDIO;
513    int video_flags = HAS_VIDEO;
514
515    if (has_text) {
516      audio_flags |= HAS_TEXT;
517      video_flags |= HAS_TEXT;
518    }
519
520    AppendInitSegmentWithSourceId(audio_id, audio_flags);
521    AppendInitSegmentWithSourceId(video_id, video_flags);
522    return true;
523  }
524
525  bool InitDemuxerAudioAndVideoSources(const std::string& audio_id,
526                                       const std::string& video_id) {
527    return InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, false);
528  }
529
530  // Initializes the demuxer with data from 2 files with different
531  // decoder configurations. This is used to test the decoder config change
532  // logic.
533  //
534  // bear-320x240.webm VideoDecoderConfig returns 320x240 for its natural_size()
535  // bear-640x360.webm VideoDecoderConfig returns 640x360 for its natural_size()
536  // The resulting video stream returns data from each file for the following
537  // time ranges.
538  // bear-320x240.webm : [0-501)       [801-2737)
539  // bear-640x360.webm :       [527-793)
540  //
541  // bear-320x240.webm AudioDecoderConfig returns 3863 for its extra_data_size()
542  // bear-640x360.webm AudioDecoderConfig returns 3935 for its extra_data_size()
543  // The resulting audio stream returns data from each file for the following
544  // time ranges.
545  // bear-320x240.webm : [0-524)       [779-2737)
546  // bear-640x360.webm :       [527-759)
547  bool InitDemuxerWithConfigChangeData() {
548    scoped_refptr<DecoderBuffer> bear1 = ReadTestDataFile("bear-320x240.webm");
549    scoped_refptr<DecoderBuffer> bear2 = ReadTestDataFile("bear-640x360.webm");
550
551    EXPECT_CALL(*this, DemuxerOpened());
552    demuxer_->Initialize(
553        &host_, CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744),
554                                 PIPELINE_OK), true);
555
556    if (AddId(kSourceId, HAS_AUDIO | HAS_VIDEO) != ChunkDemuxer::kOk)
557      return false;
558
559    // Append the whole bear1 file.
560    AppendData(bear1->data(), bear1->data_size());
561    CheckExpectedRanges(kSourceId, "{ [0,2737) }");
562
563    // Append initialization segment for bear2.
564    // Note: Offsets here and below are derived from
565    // media/test/data/bear-640x360-manifest.js and
566    // media/test/data/bear-320x240-manifest.js which were
567    // generated from media/test/data/bear-640x360.webm and
568    // media/test/data/bear-320x240.webm respectively.
569    AppendData(bear2->data(), 4340);
570
571    // Append a media segment that goes from [0.527000, 1.014000).
572    AppendData(bear2->data() + 55290, 18785);
573    CheckExpectedRanges(kSourceId, "{ [0,1028) [1201,2737) }");
574
575    // Append initialization segment for bear1 & fill gap with [779-1197)
576    // segment.
577    AppendData(bear1->data(), 4370);
578    AppendData(bear1->data() + 72737, 28183);
579    CheckExpectedRanges(kSourceId, "{ [0,2737) }");
580
581    MarkEndOfStream(PIPELINE_OK);
582    return true;
583  }
584
585  void ShutdownDemuxer() {
586    if (demuxer_) {
587      demuxer_->Shutdown();
588      message_loop_.RunUntilIdle();
589    }
590  }
591
592  void AddSimpleBlock(ClusterBuilder* cb, int track_num, int64 timecode) {
593    uint8 data[] = { 0x00 };
594    cb->AddSimpleBlock(track_num, timecode, 0, data, sizeof(data));
595  }
596
597  scoped_ptr<Cluster> GenerateCluster(int timecode, int block_count) {
598    return GenerateCluster(timecode, timecode, block_count);
599  }
600
601  void AddVideoBlockGroup(ClusterBuilder* cb, int track_num, int64 timecode,
602                          int duration, int flags) {
603    const uint8* data =
604        (flags & kWebMFlagKeyframe) != 0 ? kVP8Keyframe : kVP8Interframe;
605    int size = (flags & kWebMFlagKeyframe) != 0 ? sizeof(kVP8Keyframe) :
606        sizeof(kVP8Interframe);
607    cb->AddBlockGroup(track_num, timecode, duration, flags, data, size);
608  }
609
610  scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
611                                      int first_video_timecode,
612                                      int block_count) {
613    CHECK_GT(block_count, 0);
614
615    int size = 10;
616    scoped_ptr<uint8[]> data(new uint8[size]);
617
618    ClusterBuilder cb;
619    cb.SetClusterTimecode(std::min(first_audio_timecode, first_video_timecode));
620
621    if (block_count == 1) {
622      cb.AddBlockGroup(kAudioTrackNum, first_audio_timecode,
623                       kAudioBlockDuration, kWebMFlagKeyframe,
624                       data.get(), size);
625      return cb.Finish();
626    }
627
628    int audio_timecode = first_audio_timecode;
629    int video_timecode = first_video_timecode;
630
631    // Create simple blocks for everything except the last 2 blocks.
632    // The first video frame must be a keyframe.
633    uint8 video_flag = kWebMFlagKeyframe;
634    for (int i = 0; i < block_count - 2; i++) {
635      if (audio_timecode <= video_timecode) {
636        cb.AddSimpleBlock(kAudioTrackNum, audio_timecode, kWebMFlagKeyframe,
637                          data.get(), size);
638        audio_timecode += kAudioBlockDuration;
639        continue;
640      }
641
642      cb.AddSimpleBlock(kVideoTrackNum, video_timecode, video_flag, data.get(),
643                        size);
644      video_timecode += kVideoBlockDuration;
645      video_flag = 0;
646    }
647
648    // Make the last 2 blocks BlockGroups so that they don't get delayed by the
649    // block duration calculation logic.
650    if (audio_timecode <= video_timecode) {
651      cb.AddBlockGroup(kAudioTrackNum, audio_timecode, kAudioBlockDuration,
652                       kWebMFlagKeyframe, data.get(), size);
653      AddVideoBlockGroup(&cb, kVideoTrackNum, video_timecode,
654                         kVideoBlockDuration, video_flag);
655    } else {
656      AddVideoBlockGroup(&cb, kVideoTrackNum, video_timecode,
657                         kVideoBlockDuration, video_flag);
658      cb.AddBlockGroup(kAudioTrackNum, audio_timecode, kAudioBlockDuration,
659                       kWebMFlagKeyframe, data.get(), size);
660    }
661
662    return cb.Finish();
663  }
664
665  scoped_ptr<Cluster> GenerateSingleStreamCluster(int timecode,
666                                                  int end_timecode,
667                                                  int track_number,
668                                                  int block_duration) {
669    CHECK_GT(end_timecode, timecode);
670
671    std::vector<uint8> data(kBlockSize);
672
673    ClusterBuilder cb;
674    cb.SetClusterTimecode(timecode);
675
676    // Create simple blocks for everything except the last block.
677    for (int i = 0; timecode < (end_timecode - block_duration); i++) {
678      cb.AddSimpleBlock(track_number, timecode, kWebMFlagKeyframe,
679                        &data[0], data.size());
680      timecode += block_duration;
681    }
682
683    // Make the last block a BlockGroup so that it doesn't get delayed by the
684    // block duration calculation logic.
685    if (track_number == kVideoTrackNum) {
686      AddVideoBlockGroup(&cb, track_number, timecode, block_duration,
687                         kWebMFlagKeyframe);
688    } else {
689      cb.AddBlockGroup(track_number, timecode, block_duration,
690                       kWebMFlagKeyframe, &data[0], data.size());
691    }
692    return cb.Finish();
693  }
694
695  void Read(DemuxerStream::Type type, const DemuxerStream::ReadCB& read_cb) {
696    demuxer_->GetStream(type)->Read(read_cb);
697    message_loop_.RunUntilIdle();
698  }
699
700  void ReadAudio(const DemuxerStream::ReadCB& read_cb) {
701    Read(DemuxerStream::AUDIO, read_cb);
702  }
703
704  void ReadVideo(const DemuxerStream::ReadCB& read_cb) {
705    Read(DemuxerStream::VIDEO, read_cb);
706  }
707
708  void GenerateExpectedReads(int timecode, int block_count) {
709    GenerateExpectedReads(timecode, timecode, block_count);
710  }
711
712  void GenerateExpectedReads(int start_audio_timecode,
713                             int start_video_timecode,
714                             int block_count) {
715    CHECK_GT(block_count, 0);
716
717    if (block_count == 1) {
718      ExpectRead(DemuxerStream::AUDIO, start_audio_timecode);
719      return;
720    }
721
722    int audio_timecode = start_audio_timecode;
723    int video_timecode = start_video_timecode;
724
725    for (int i = 0; i < block_count; i++) {
726      if (audio_timecode <= video_timecode) {
727        ExpectRead(DemuxerStream::AUDIO, audio_timecode);
728        audio_timecode += kAudioBlockDuration;
729        continue;
730      }
731
732      ExpectRead(DemuxerStream::VIDEO, video_timecode);
733      video_timecode += kVideoBlockDuration;
734    }
735  }
736
737  void GenerateSingleStreamExpectedReads(int timecode,
738                                         int block_count,
739                                         DemuxerStream::Type type,
740                                         int block_duration) {
741    CHECK_GT(block_count, 0);
742    int stream_timecode = timecode;
743
744    for (int i = 0; i < block_count; i++) {
745      ExpectRead(type, stream_timecode);
746      stream_timecode += block_duration;
747    }
748  }
749
750  void GenerateAudioStreamExpectedReads(int timecode, int block_count) {
751    GenerateSingleStreamExpectedReads(
752        timecode, block_count, DemuxerStream::AUDIO, kAudioBlockDuration);
753  }
754
755  void GenerateVideoStreamExpectedReads(int timecode, int block_count) {
756    GenerateSingleStreamExpectedReads(
757        timecode, block_count, DemuxerStream::VIDEO, kVideoBlockDuration);
758  }
759
760  scoped_ptr<Cluster> GenerateEmptyCluster(int timecode) {
761    ClusterBuilder cb;
762    cb.SetClusterTimecode(timecode);
763    return cb.Finish();
764  }
765
766  void CheckExpectedRanges(const std::string& expected) {
767    CheckExpectedRanges(kSourceId, expected);
768  }
769
770  void CheckExpectedRanges(const std::string&  id,
771                           const std::string& expected) {
772    Ranges<base::TimeDelta> r = demuxer_->GetBufferedRanges(id);
773
774    std::stringstream ss;
775    ss << "{ ";
776    for (size_t i = 0; i < r.size(); ++i) {
777      ss << "[" << r.start(i).InMilliseconds() << ","
778         << r.end(i).InMilliseconds() << ") ";
779    }
780    ss << "}";
781    EXPECT_EQ(expected, ss.str());
782  }
783
784  MOCK_METHOD2(ReadDone, void(DemuxerStream::Status status,
785                              const scoped_refptr<DecoderBuffer>&));
786
787  void StoreStatusAndBuffer(DemuxerStream::Status* status_out,
788                            scoped_refptr<DecoderBuffer>* buffer_out,
789                            DemuxerStream::Status status,
790                            const scoped_refptr<DecoderBuffer>& buffer) {
791    *status_out = status;
792    *buffer_out = buffer;
793  }
794
795  void ReadUntilNotOkOrEndOfStream(DemuxerStream::Type type,
796                                   DemuxerStream::Status* status,
797                                   base::TimeDelta* last_timestamp) {
798    DemuxerStream* stream = demuxer_->GetStream(type);
799    scoped_refptr<DecoderBuffer> buffer;
800
801    *last_timestamp = kNoTimestamp();
802    do {
803      stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
804                              base::Unretained(this), status, &buffer));
805      base::MessageLoop::current()->RunUntilIdle();
806      if (*status == DemuxerStream::kOk && !buffer->end_of_stream())
807        *last_timestamp = buffer->timestamp();
808    } while (*status == DemuxerStream::kOk && !buffer->end_of_stream());
809  }
810
811  void ExpectEndOfStream(DemuxerStream::Type type) {
812    EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk, IsEndOfStream()));
813    demuxer_->GetStream(type)->Read(base::Bind(
814        &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
815    message_loop_.RunUntilIdle();
816  }
817
818  void ExpectRead(DemuxerStream::Type type, int64 timestamp_in_ms) {
819    EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk,
820                                HasTimestamp(timestamp_in_ms)));
821    demuxer_->GetStream(type)->Read(base::Bind(
822        &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
823    message_loop_.RunUntilIdle();
824  }
825
826  void ExpectConfigChanged(DemuxerStream::Type type) {
827    EXPECT_CALL(*this, ReadDone(DemuxerStream::kConfigChanged, _));
828    demuxer_->GetStream(type)->Read(base::Bind(
829        &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
830    message_loop_.RunUntilIdle();
831  }
832
833  void CheckExpectedBuffers(DemuxerStream* stream,
834                            const std::string& expected) {
835    std::vector<std::string> timestamps;
836    base::SplitString(expected, ' ', &timestamps);
837    std::stringstream ss;
838    for (size_t i = 0; i < timestamps.size(); ++i) {
839      DemuxerStream::Status status;
840      scoped_refptr<DecoderBuffer> buffer;
841      stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
842                              base::Unretained(this), &status, &buffer));
843      base::MessageLoop::current()->RunUntilIdle();
844      if (status != DemuxerStream::kOk || buffer->end_of_stream())
845        break;
846
847      if (i > 0)
848        ss << " ";
849      ss << buffer->timestamp().InMilliseconds();
850    }
851    EXPECT_EQ(expected, ss.str());
852  }
853
854  MOCK_METHOD1(Checkpoint, void(int id));
855
856  struct BufferTimestamps {
857    int video_time_ms;
858    int audio_time_ms;
859  };
860  static const int kSkip = -1;
861
862  // Test parsing a WebM file.
863  // |filename| - The name of the file in media/test/data to parse.
864  // |timestamps| - The expected timestamps on the parsed buffers.
865  //    a timestamp of kSkip indicates that a Read() call for that stream
866  //    shouldn't be made on that iteration of the loop. If both streams have
867  //    a kSkip then the loop will terminate.
868  bool ParseWebMFile(const std::string& filename,
869                     const BufferTimestamps* timestamps,
870                     const base::TimeDelta& duration) {
871    return ParseWebMFile(filename, timestamps, duration, HAS_AUDIO | HAS_VIDEO);
872  }
873
874  bool ParseWebMFile(const std::string& filename,
875                     const BufferTimestamps* timestamps,
876                     const base::TimeDelta& duration,
877                     int stream_flags) {
878    EXPECT_CALL(*this, DemuxerOpened());
879    demuxer_->Initialize(
880        &host_, CreateInitDoneCB(duration, PIPELINE_OK), true);
881
882    if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
883      return false;
884
885    // Read a WebM file into memory and send the data to the demuxer.
886    scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(filename);
887    AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
888
889    // Verify that the timestamps on the first few packets match what we
890    // expect.
891    for (size_t i = 0;
892         (timestamps[i].audio_time_ms != kSkip ||
893          timestamps[i].video_time_ms != kSkip);
894         i++) {
895      bool audio_read_done = false;
896      bool video_read_done = false;
897
898      if (timestamps[i].audio_time_ms != kSkip) {
899        ReadAudio(base::Bind(&OnReadDone,
900                             base::TimeDelta::FromMilliseconds(
901                                 timestamps[i].audio_time_ms),
902                             &audio_read_done));
903        EXPECT_TRUE(audio_read_done);
904      }
905
906      if (timestamps[i].video_time_ms != kSkip) {
907        ReadVideo(base::Bind(&OnReadDone,
908                             base::TimeDelta::FromMilliseconds(
909                                 timestamps[i].video_time_ms),
910                             &video_read_done));
911        EXPECT_TRUE(video_read_done);
912      }
913    }
914
915    return true;
916  }
917
918  MOCK_METHOD0(DemuxerOpened, void());
919  // TODO(xhwang): This is a workaround of the issue that move-only parameters
920  // are not supported in mocked methods. Remove this when the issue is fixed
921  // (http://code.google.com/p/googletest/issues/detail?id=395) or when we use
922  // std::string instead of scoped_ptr<uint8[]> (http://crbug.com/130689).
923  MOCK_METHOD3(NeedKeyMock, void(const std::string& type,
924                                 const uint8* init_data, int init_data_size));
925  void DemuxerNeedKey(const std::string& type,
926                      const std::vector<uint8>& init_data) {
927    const uint8* init_data_ptr = init_data.empty() ? NULL : &init_data[0];
928    NeedKeyMock(type, init_data_ptr, init_data.size());
929  }
930
931  void Seek(base::TimeDelta seek_time) {
932    demuxer_->StartWaitingForSeek(seek_time);
933    demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
934    message_loop_.RunUntilIdle();
935  }
936
937  void MarkEndOfStream(PipelineStatus status) {
938    demuxer_->MarkEndOfStream(status);
939    message_loop_.RunUntilIdle();
940  }
941
942  base::MessageLoop message_loop_;
943  MockDemuxerHost host_;
944
945  scoped_ptr<ChunkDemuxer> demuxer_;
946
947 private:
948  DISALLOW_COPY_AND_ASSIGN(ChunkDemuxerTest);
949};
950
951TEST_F(ChunkDemuxerTest, Init) {
952  // Test no streams, audio-only, video-only, and audio & video scenarios.
953  // Audio and video streams can be encrypted or not encrypted.
954  for (int i = 0; i < 16; i++) {
955    bool has_audio = (i & 0x1) != 0;
956    bool has_video = (i & 0x2) != 0;
957    bool is_audio_encrypted = (i & 0x4) != 0;
958    bool is_video_encrypted = (i & 0x8) != 0;
959
960    // No test on invalid combination.
961    if ((!has_audio && is_audio_encrypted) ||
962        (!has_video && is_video_encrypted)) {
963      continue;
964    }
965
966    CreateNewDemuxer();
967
968    if (is_audio_encrypted || is_video_encrypted) {
969      int need_key_count = (is_audio_encrypted ? 1 : 0) +
970                           (is_video_encrypted ? 1 : 0);
971      EXPECT_CALL(*this, NeedKeyMock(kWebMEncryptInitDataType, NotNull(),
972                                     DecryptConfig::kDecryptionKeySize))
973          .Times(Exactly(need_key_count));
974    }
975
976    int stream_flags = 0;
977    if (has_audio)
978      stream_flags |= HAS_AUDIO;
979
980    if (has_video)
981      stream_flags |= HAS_VIDEO;
982
983    ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
984        stream_flags, is_audio_encrypted, is_video_encrypted));
985
986    DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
987    if (has_audio) {
988      ASSERT_TRUE(audio_stream);
989
990      const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
991      EXPECT_EQ(kCodecVorbis, config.codec());
992      EXPECT_EQ(32, config.bits_per_channel());
993      EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
994      EXPECT_EQ(44100, config.samples_per_second());
995      EXPECT_TRUE(config.extra_data());
996      EXPECT_GT(config.extra_data_size(), 0u);
997      EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
998      EXPECT_EQ(is_audio_encrypted,
999                audio_stream->audio_decoder_config().is_encrypted());
1000    } else {
1001      EXPECT_FALSE(audio_stream);
1002    }
1003
1004    DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1005    if (has_video) {
1006      EXPECT_TRUE(video_stream);
1007      EXPECT_EQ(is_video_encrypted,
1008                video_stream->video_decoder_config().is_encrypted());
1009    } else {
1010      EXPECT_FALSE(video_stream);
1011    }
1012
1013    ShutdownDemuxer();
1014    demuxer_.reset();
1015  }
1016}
1017
1018// TODO(acolwell): Fold this test into Init tests since the tests are
1019// almost identical.
1020TEST_F(ChunkDemuxerTest, InitText) {
1021  // Test with 1 video stream and 1 text streams, and 0 or 1 audio streams.
1022  // No encryption cases handled here.
1023  bool has_video = true;
1024  bool is_audio_encrypted = false;
1025  bool is_video_encrypted = false;
1026  for (int i = 0; i < 2; i++) {
1027    bool has_audio = (i & 0x1) != 0;
1028
1029    CreateNewDemuxer();
1030
1031    DemuxerStream* text_stream = NULL;
1032    TextTrackConfig text_config;
1033    EXPECT_CALL(host_, AddTextStream(_, _))
1034        .WillOnce(DoAll(SaveArg<0>(&text_stream),
1035                        SaveArg<1>(&text_config)));
1036
1037    int stream_flags = HAS_TEXT;
1038    if (has_audio)
1039      stream_flags |= HAS_AUDIO;
1040
1041    if (has_video)
1042      stream_flags |= HAS_VIDEO;
1043
1044    ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1045        stream_flags, is_audio_encrypted, is_video_encrypted));
1046    ASSERT_TRUE(text_stream);
1047    EXPECT_EQ(DemuxerStream::TEXT, text_stream->type());
1048    EXPECT_EQ(kTextSubtitles, text_config.kind());
1049
1050    DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1051    if (has_audio) {
1052      ASSERT_TRUE(audio_stream);
1053
1054      const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1055      EXPECT_EQ(kCodecVorbis, config.codec());
1056      EXPECT_EQ(32, config.bits_per_channel());
1057      EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
1058      EXPECT_EQ(44100, config.samples_per_second());
1059      EXPECT_TRUE(config.extra_data());
1060      EXPECT_GT(config.extra_data_size(), 0u);
1061      EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
1062      EXPECT_EQ(is_audio_encrypted,
1063                audio_stream->audio_decoder_config().is_encrypted());
1064    } else {
1065      EXPECT_FALSE(audio_stream);
1066    }
1067
1068    DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1069    if (has_video) {
1070      EXPECT_TRUE(video_stream);
1071      EXPECT_EQ(is_video_encrypted,
1072                video_stream->video_decoder_config().is_encrypted());
1073    } else {
1074      EXPECT_FALSE(video_stream);
1075    }
1076
1077    ShutdownDemuxer();
1078    demuxer_.reset();
1079  }
1080}
1081
1082// Make sure that the demuxer reports an error if Shutdown()
1083// is called before all the initialization segments are appended.
1084TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppended) {
1085  EXPECT_CALL(*this, DemuxerOpened());
1086  demuxer_->Initialize(
1087      &host_, CreateInitDoneCB(
1088          kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1089
1090  EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1091  EXPECT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
1092
1093  AppendInitSegmentWithSourceId("audio", HAS_AUDIO);
1094
1095  ShutdownDemuxer();
1096}
1097
1098TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppendedText) {
1099  EXPECT_CALL(*this, DemuxerOpened());
1100  demuxer_->Initialize(
1101      &host_, CreateInitDoneCB(
1102          kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1103
1104  EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1105  EXPECT_EQ(AddId("video_and_text", HAS_VIDEO), ChunkDemuxer::kOk);
1106
1107  EXPECT_CALL(host_, AddTextStream(_, _))
1108      .Times(Exactly(1));
1109
1110  AppendInitSegmentWithSourceId("video_and_text", HAS_VIDEO | HAS_TEXT);
1111
1112  ShutdownDemuxer();
1113}
1114
1115// Verifies that all streams waiting for data receive an end of stream
1116// buffer when Shutdown() is called.
1117TEST_F(ChunkDemuxerTest, Shutdown_EndOfStreamWhileWaitingForData) {
1118  DemuxerStream* text_stream = NULL;
1119  EXPECT_CALL(host_, AddTextStream(_, _))
1120      .WillOnce(SaveArg<0>(&text_stream));
1121  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1122
1123  DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1124  DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1125
1126  bool audio_read_done = false;
1127  bool video_read_done = false;
1128  bool text_read_done = false;
1129  audio_stream->Read(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
1130  video_stream->Read(base::Bind(&OnReadDone_EOSExpected, &video_read_done));
1131  text_stream->Read(base::Bind(&OnReadDone_EOSExpected, &text_read_done));
1132  message_loop_.RunUntilIdle();
1133
1134  EXPECT_FALSE(audio_read_done);
1135  EXPECT_FALSE(video_read_done);
1136  EXPECT_FALSE(text_read_done);
1137
1138  ShutdownDemuxer();
1139
1140  EXPECT_TRUE(audio_read_done);
1141  EXPECT_TRUE(video_read_done);
1142  EXPECT_TRUE(text_read_done);
1143}
1144
1145// Test that Seek() completes successfully when the first cluster
1146// arrives.
1147TEST_F(ChunkDemuxerTest, AppendDataAfterSeek) {
1148  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1149  AppendCluster(kDefaultFirstCluster());
1150
1151  InSequence s;
1152
1153  EXPECT_CALL(*this, Checkpoint(1));
1154
1155  Seek(base::TimeDelta::FromMilliseconds(46));
1156
1157  EXPECT_CALL(*this, Checkpoint(2));
1158
1159  Checkpoint(1);
1160
1161  AppendCluster(kDefaultSecondCluster());
1162
1163  message_loop_.RunUntilIdle();
1164
1165  Checkpoint(2);
1166}
1167
1168// Test that parsing errors are handled for clusters appended after init.
1169TEST_F(ChunkDemuxerTest, ErrorWhileParsingClusterAfterInit) {
1170  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1171  AppendCluster(kDefaultFirstCluster());
1172
1173  EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1174  AppendGarbage();
1175}
1176
1177// Test the case where a Seek() is requested while the parser
1178// is in the middle of cluster. This is to verify that the parser
1179// does not reset itself on a seek.
1180TEST_F(ChunkDemuxerTest, SeekWhileParsingCluster) {
1181  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1182
1183  InSequence s;
1184
1185  scoped_ptr<Cluster> cluster_a(GenerateCluster(0, 6));
1186
1187  // Split the cluster into two appends at an arbitrary point near the end.
1188  int first_append_size = cluster_a->size() - 11;
1189  int second_append_size = cluster_a->size() - first_append_size;
1190
1191  // Append the first part of the cluster.
1192  AppendData(cluster_a->data(), first_append_size);
1193
1194  ExpectRead(DemuxerStream::AUDIO, 0);
1195  ExpectRead(DemuxerStream::VIDEO, 0);
1196  ExpectRead(DemuxerStream::AUDIO, kAudioBlockDuration);
1197  // Note: We skip trying to read a video buffer here because computing
1198  // the duration for this block relies on successfully parsing the last block
1199  // in the cluster the cluster.
1200  ExpectRead(DemuxerStream::AUDIO, 2 * kAudioBlockDuration);
1201
1202  Seek(base::TimeDelta::FromSeconds(5));
1203
1204  // Append the rest of the cluster.
1205  AppendData(cluster_a->data() + first_append_size, second_append_size);
1206
1207  // Append the new cluster and verify that only the blocks
1208  // in the new cluster are returned.
1209  AppendCluster(GenerateCluster(5000, 6));
1210  GenerateExpectedReads(5000, 6);
1211}
1212
1213// Test the case where AppendData() is called before Init().
1214TEST_F(ChunkDemuxerTest, AppendDataBeforeInit) {
1215  scoped_ptr<uint8[]> info_tracks;
1216  int info_tracks_size = 0;
1217  CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1218                    false, false, &info_tracks, &info_tracks_size);
1219
1220  demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size);
1221}
1222
1223// Make sure Read() callbacks are dispatched with the proper data.
1224TEST_F(ChunkDemuxerTest, Read) {
1225  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1226
1227  AppendCluster(kDefaultFirstCluster());
1228
1229  bool audio_read_done = false;
1230  bool video_read_done = false;
1231  ReadAudio(base::Bind(&OnReadDone,
1232                       base::TimeDelta::FromMilliseconds(0),
1233                       &audio_read_done));
1234  ReadVideo(base::Bind(&OnReadDone,
1235                       base::TimeDelta::FromMilliseconds(0),
1236                       &video_read_done));
1237
1238  EXPECT_TRUE(audio_read_done);
1239  EXPECT_TRUE(video_read_done);
1240}
1241
1242TEST_F(ChunkDemuxerTest, OutOfOrderClusters) {
1243  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1244  AppendCluster(kDefaultFirstCluster());
1245  AppendCluster(GenerateCluster(10, 4));
1246
1247  // Make sure that AppendCluster() does not fail with a cluster that has
1248  // overlaps with the previously appended cluster.
1249  AppendCluster(GenerateCluster(5, 4));
1250
1251  // Verify that AppendData() can still accept more data.
1252  scoped_ptr<Cluster> cluster_c(GenerateCluster(45, 2));
1253  demuxer_->AppendData(kSourceId, cluster_c->data(), cluster_c->size());
1254}
1255
1256TEST_F(ChunkDemuxerTest, NonMonotonicButAboveClusterTimecode) {
1257  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1258  AppendCluster(kDefaultFirstCluster());
1259
1260  ClusterBuilder cb;
1261
1262  // Test the case where block timecodes are not monotonically
1263  // increasing but stay above the cluster timecode.
1264  cb.SetClusterTimecode(5);
1265  AddSimpleBlock(&cb, kAudioTrackNum, 5);
1266  AddSimpleBlock(&cb, kVideoTrackNum, 10);
1267  AddSimpleBlock(&cb, kAudioTrackNum, 7);
1268  AddSimpleBlock(&cb, kVideoTrackNum, 15);
1269
1270  EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1271  AppendCluster(cb.Finish());
1272
1273  // Verify that AppendData() ignores data after the error.
1274  scoped_ptr<Cluster> cluster_b(GenerateCluster(20, 2));
1275  demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size());
1276}
1277
1278TEST_F(ChunkDemuxerTest, BackwardsAndBeforeClusterTimecode) {
1279  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1280  AppendCluster(kDefaultFirstCluster());
1281
1282  ClusterBuilder cb;
1283
1284  // Test timecodes going backwards and including values less than the cluster
1285  // timecode.
1286  cb.SetClusterTimecode(5);
1287  AddSimpleBlock(&cb, kAudioTrackNum, 5);
1288  AddSimpleBlock(&cb, kVideoTrackNum, 5);
1289  AddSimpleBlock(&cb, kAudioTrackNum, 3);
1290  AddSimpleBlock(&cb, kVideoTrackNum, 3);
1291
1292  EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1293  AppendCluster(cb.Finish());
1294
1295  // Verify that AppendData() ignores data after the error.
1296  scoped_ptr<Cluster> cluster_b(GenerateCluster(6, 2));
1297  demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size());
1298}
1299
1300
1301TEST_F(ChunkDemuxerTest, PerStreamMonotonicallyIncreasingTimestamps) {
1302  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1303  AppendCluster(kDefaultFirstCluster());
1304
1305  ClusterBuilder cb;
1306
1307  // Test monotonic increasing timestamps on a per stream
1308  // basis.
1309  cb.SetClusterTimecode(5);
1310  AddSimpleBlock(&cb, kAudioTrackNum, 5);
1311  AddSimpleBlock(&cb, kVideoTrackNum, 5);
1312  AddSimpleBlock(&cb, kAudioTrackNum, 4);
1313  AddSimpleBlock(&cb, kVideoTrackNum, 7);
1314
1315  EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1316  AppendCluster(cb.Finish());
1317}
1318
1319// Test the case where a cluster is passed to AppendCluster() before
1320// INFO & TRACKS data.
1321TEST_F(ChunkDemuxerTest, ClusterBeforeInitSegment) {
1322  EXPECT_CALL(*this, DemuxerOpened());
1323  demuxer_->Initialize(
1324      &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1325
1326  ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1327
1328  AppendCluster(GenerateCluster(0, 1));
1329}
1330
1331// Test cases where we get an MarkEndOfStream() call during initialization.
1332TEST_F(ChunkDemuxerTest, EOSDuringInit) {
1333  EXPECT_CALL(*this, DemuxerOpened());
1334  demuxer_->Initialize(
1335      &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1336  MarkEndOfStream(PIPELINE_OK);
1337}
1338
1339TEST_F(ChunkDemuxerTest, EndOfStreamWithNoAppend) {
1340  EXPECT_CALL(*this, DemuxerOpened());
1341  demuxer_->Initialize(
1342      &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1343
1344  ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1345
1346  CheckExpectedRanges("{ }");
1347  MarkEndOfStream(PIPELINE_OK);
1348  ShutdownDemuxer();
1349  CheckExpectedRanges("{ }");
1350  demuxer_->RemoveId(kSourceId);
1351  demuxer_.reset();
1352}
1353
1354TEST_F(ChunkDemuxerTest, EndOfStreamWithNoMediaAppend) {
1355  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1356
1357  CheckExpectedRanges("{ }");
1358  MarkEndOfStream(PIPELINE_OK);
1359  CheckExpectedRanges("{ }");
1360}
1361
1362TEST_F(ChunkDemuxerTest, DecodeErrorEndOfStream) {
1363  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1364
1365  AppendCluster(kDefaultFirstCluster());
1366  CheckExpectedRanges(kDefaultFirstClusterRange);
1367
1368  EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1369  MarkEndOfStream(PIPELINE_ERROR_DECODE);
1370  CheckExpectedRanges(kDefaultFirstClusterRange);
1371}
1372
1373TEST_F(ChunkDemuxerTest, NetworkErrorEndOfStream) {
1374  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1375
1376  AppendCluster(kDefaultFirstCluster());
1377  CheckExpectedRanges(kDefaultFirstClusterRange);
1378
1379  EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_NETWORK));
1380  MarkEndOfStream(PIPELINE_ERROR_NETWORK);
1381}
1382
1383// Helper class to reduce duplicate code when testing end of stream
1384// Read() behavior.
1385class EndOfStreamHelper {
1386 public:
1387  explicit EndOfStreamHelper(Demuxer* demuxer)
1388      : demuxer_(demuxer),
1389        audio_read_done_(false),
1390        video_read_done_(false) {
1391  }
1392
1393  // Request a read on the audio and video streams.
1394  void RequestReads() {
1395    EXPECT_FALSE(audio_read_done_);
1396    EXPECT_FALSE(video_read_done_);
1397
1398    DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
1399    DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
1400
1401    audio->Read(base::Bind(&OnEndOfStreamReadDone, &audio_read_done_));
1402    video->Read(base::Bind(&OnEndOfStreamReadDone, &video_read_done_));
1403    base::MessageLoop::current()->RunUntilIdle();
1404  }
1405
1406  // Check to see if |audio_read_done_| and |video_read_done_| variables
1407  // match |expected|.
1408  void CheckIfReadDonesWereCalled(bool expected) {
1409    base::MessageLoop::current()->RunUntilIdle();
1410    EXPECT_EQ(expected, audio_read_done_);
1411    EXPECT_EQ(expected, video_read_done_);
1412  }
1413
1414 private:
1415  static void OnEndOfStreamReadDone(
1416      bool* called,
1417      DemuxerStream::Status status,
1418      const scoped_refptr<DecoderBuffer>& buffer) {
1419    EXPECT_EQ(status, DemuxerStream::kOk);
1420    EXPECT_TRUE(buffer->end_of_stream());
1421    *called = true;
1422  }
1423
1424  Demuxer* demuxer_;
1425  bool audio_read_done_;
1426  bool video_read_done_;
1427
1428  DISALLOW_COPY_AND_ASSIGN(EndOfStreamHelper);
1429};
1430
1431// Make sure that all pending reads that we don't have media data for get an
1432// "end of stream" buffer when MarkEndOfStream() is called.
1433TEST_F(ChunkDemuxerTest, EndOfStreamWithPendingReads) {
1434  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1435
1436  AppendCluster(GenerateCluster(0, 2));
1437
1438  bool audio_read_done_1 = false;
1439  bool video_read_done_1 = false;
1440  EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1441  EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1442
1443  ReadAudio(base::Bind(&OnReadDone,
1444                       base::TimeDelta::FromMilliseconds(0),
1445                       &audio_read_done_1));
1446  ReadVideo(base::Bind(&OnReadDone,
1447                       base::TimeDelta::FromMilliseconds(0),
1448                       &video_read_done_1));
1449  message_loop_.RunUntilIdle();
1450
1451  EXPECT_TRUE(audio_read_done_1);
1452  EXPECT_TRUE(video_read_done_1);
1453
1454  end_of_stream_helper_1.RequestReads();
1455
1456  EXPECT_CALL(host_, SetDuration(
1457      base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1458  MarkEndOfStream(PIPELINE_OK);
1459
1460  end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1461
1462  end_of_stream_helper_2.RequestReads();
1463  end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1464}
1465
1466// Make sure that all Read() calls after we get an MarkEndOfStream()
1467// call return an "end of stream" buffer.
1468TEST_F(ChunkDemuxerTest, ReadsAfterEndOfStream) {
1469  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1470
1471  AppendCluster(GenerateCluster(0, 2));
1472
1473  bool audio_read_done_1 = false;
1474  bool video_read_done_1 = false;
1475  EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1476  EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1477  EndOfStreamHelper end_of_stream_helper_3(demuxer_.get());
1478
1479  ReadAudio(base::Bind(&OnReadDone,
1480                       base::TimeDelta::FromMilliseconds(0),
1481                       &audio_read_done_1));
1482  ReadVideo(base::Bind(&OnReadDone,
1483                       base::TimeDelta::FromMilliseconds(0),
1484                       &video_read_done_1));
1485
1486  end_of_stream_helper_1.RequestReads();
1487
1488  EXPECT_TRUE(audio_read_done_1);
1489  EXPECT_TRUE(video_read_done_1);
1490  end_of_stream_helper_1.CheckIfReadDonesWereCalled(false);
1491
1492  EXPECT_CALL(host_, SetDuration(
1493      base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1494  MarkEndOfStream(PIPELINE_OK);
1495
1496  end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1497
1498  // Request a few more reads and make sure we immediately get
1499  // end of stream buffers.
1500  end_of_stream_helper_2.RequestReads();
1501  end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1502
1503  end_of_stream_helper_3.RequestReads();
1504  end_of_stream_helper_3.CheckIfReadDonesWereCalled(true);
1505}
1506
1507TEST_F(ChunkDemuxerTest, EndOfStreamDuringCanceledSeek) {
1508  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1509
1510  AppendCluster(0, 10);
1511  EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(138)));
1512  MarkEndOfStream(PIPELINE_OK);
1513
1514  // Start the first seek.
1515  Seek(base::TimeDelta::FromMilliseconds(20));
1516
1517  // Simulate another seek being requested before the first
1518  // seek has finished prerolling.
1519  base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(30);
1520  demuxer_->CancelPendingSeek(seek_time2);
1521
1522  // Finish second seek.
1523  Seek(seek_time2);
1524
1525  DemuxerStream::Status status;
1526  base::TimeDelta last_timestamp;
1527
1528  // Make sure audio can reach end of stream.
1529  ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
1530  ASSERT_EQ(status, DemuxerStream::kOk);
1531
1532  // Make sure video can reach end of stream.
1533  ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
1534  ASSERT_EQ(status, DemuxerStream::kOk);
1535}
1536
1537// Verify buffered range change behavior for audio/video/text tracks.
1538TEST_F(ChunkDemuxerTest, EndOfStreamRangeChanges) {
1539  DemuxerStream* text_stream = NULL;
1540
1541  EXPECT_CALL(host_, AddTextStream(_, _))
1542      .WillOnce(SaveArg<0>(&text_stream));
1543  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1544
1545  AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 30");
1546  AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23K");
1547
1548  // Check expected ranges and verify that an empty text track does not
1549  // affect the expected ranges.
1550  CheckExpectedRanges(kSourceId, "{ [0,46) }");
1551
1552  EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(60)));
1553  MarkEndOfStream(PIPELINE_OK);
1554
1555  // Check expected ranges and verify that an empty text track does not
1556  // affect the expected ranges.
1557  CheckExpectedRanges(kSourceId, "{ [0,60) }");
1558
1559  // Unmark end of stream state and verify that the ranges return to
1560  // their pre-"end of stream" values.
1561  demuxer_->UnmarkEndOfStream();
1562  CheckExpectedRanges(kSourceId, "{ [0,46) }");
1563
1564  // Add text track data and verify that the buffered ranges don't change
1565  // since the intersection of all the tracks doesn't change.
1566  EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(200)));
1567  AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K");
1568  CheckExpectedRanges(kSourceId, "{ [0,46) }");
1569
1570  // Mark end of stream and verify that text track data is reflected in
1571  // the new range.
1572  MarkEndOfStream(PIPELINE_OK);
1573  CheckExpectedRanges(kSourceId, "{ [0,200) }");
1574}
1575
1576// Make sure AppendData() will accept elements that span multiple calls.
1577TEST_F(ChunkDemuxerTest, AppendingInPieces) {
1578  EXPECT_CALL(*this, DemuxerOpened());
1579  demuxer_->Initialize(
1580      &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
1581
1582  ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1583
1584  scoped_ptr<uint8[]> info_tracks;
1585  int info_tracks_size = 0;
1586  CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1587                    false, false, &info_tracks, &info_tracks_size);
1588
1589  scoped_ptr<Cluster> cluster_a(kDefaultFirstCluster());
1590  scoped_ptr<Cluster> cluster_b(kDefaultSecondCluster());
1591
1592  size_t buffer_size = info_tracks_size + cluster_a->size() + cluster_b->size();
1593  scoped_ptr<uint8[]> buffer(new uint8[buffer_size]);
1594  uint8* dst = buffer.get();
1595  memcpy(dst, info_tracks.get(), info_tracks_size);
1596  dst += info_tracks_size;
1597
1598  memcpy(dst, cluster_a->data(), cluster_a->size());
1599  dst += cluster_a->size();
1600
1601  memcpy(dst, cluster_b->data(), cluster_b->size());
1602  dst += cluster_b->size();
1603
1604  AppendDataInPieces(buffer.get(), buffer_size);
1605
1606  GenerateExpectedReads(0, 9);
1607}
1608
1609TEST_F(ChunkDemuxerTest, WebMFile_AudioAndVideo) {
1610  struct BufferTimestamps buffer_timestamps[] = {
1611    {0, 0},
1612    {33, 3},
1613    {67, 6},
1614    {100, 9},
1615    {133, 12},
1616    {kSkip, kSkip},
1617  };
1618
1619  ASSERT_TRUE(ParseWebMFile("bear-320x240.webm", buffer_timestamps,
1620                            base::TimeDelta::FromMilliseconds(2744)));
1621}
1622
1623TEST_F(ChunkDemuxerTest, WebMFile_LiveAudioAndVideo) {
1624  struct BufferTimestamps buffer_timestamps[] = {
1625    {0, 0},
1626    {33, 3},
1627    {67, 6},
1628    {100, 9},
1629    {133, 12},
1630    {kSkip, kSkip},
1631  };
1632
1633  ASSERT_TRUE(ParseWebMFile("bear-320x240-live.webm", buffer_timestamps,
1634                            kInfiniteDuration()));
1635}
1636
1637TEST_F(ChunkDemuxerTest, WebMFile_AudioOnly) {
1638  struct BufferTimestamps buffer_timestamps[] = {
1639    {kSkip, 0},
1640    {kSkip, 3},
1641    {kSkip, 6},
1642    {kSkip, 9},
1643    {kSkip, 12},
1644    {kSkip, kSkip},
1645  };
1646
1647  ASSERT_TRUE(ParseWebMFile("bear-320x240-audio-only.webm", buffer_timestamps,
1648                            base::TimeDelta::FromMilliseconds(2744),
1649                            HAS_AUDIO));
1650}
1651
1652TEST_F(ChunkDemuxerTest, WebMFile_VideoOnly) {
1653  struct BufferTimestamps buffer_timestamps[] = {
1654    {0, kSkip},
1655    {33, kSkip},
1656    {67, kSkip},
1657    {100, kSkip},
1658    {133, kSkip},
1659    {kSkip, kSkip},
1660  };
1661
1662  ASSERT_TRUE(ParseWebMFile("bear-320x240-video-only.webm", buffer_timestamps,
1663                            base::TimeDelta::FromMilliseconds(2703),
1664                            HAS_VIDEO));
1665}
1666
1667TEST_F(ChunkDemuxerTest, WebMFile_AltRefFrames) {
1668  struct BufferTimestamps buffer_timestamps[] = {
1669    {0, 0},
1670    {33, 3},
1671    {33, 6},
1672    {67, 9},
1673    {100, 12},
1674    {kSkip, kSkip},
1675  };
1676
1677  ASSERT_TRUE(ParseWebMFile("bear-320x240-altref.webm", buffer_timestamps,
1678                            base::TimeDelta::FromMilliseconds(2767)));
1679}
1680
1681// Verify that we output buffers before the entire cluster has been parsed.
1682TEST_F(ChunkDemuxerTest, IncrementalClusterParsing) {
1683  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1684  AppendEmptyCluster(0);
1685
1686  scoped_ptr<Cluster> cluster(GenerateCluster(0, 6));
1687
1688  bool audio_read_done = false;
1689  bool video_read_done = false;
1690  ReadAudio(base::Bind(&OnReadDone,
1691                       base::TimeDelta::FromMilliseconds(0),
1692                       &audio_read_done));
1693  ReadVideo(base::Bind(&OnReadDone,
1694                       base::TimeDelta::FromMilliseconds(0),
1695                       &video_read_done));
1696
1697  // Make sure the reads haven't completed yet.
1698  EXPECT_FALSE(audio_read_done);
1699  EXPECT_FALSE(video_read_done);
1700
1701  // Append data one byte at a time until the audio read completes.
1702  int i = 0;
1703  for (; i < cluster->size() && !audio_read_done; ++i) {
1704    AppendData(cluster->data() + i, 1);
1705    message_loop_.RunUntilIdle();
1706  }
1707
1708  EXPECT_TRUE(audio_read_done);
1709  EXPECT_FALSE(video_read_done);
1710  EXPECT_GT(i, 0);
1711  EXPECT_LT(i, cluster->size());
1712
1713  // Append data one byte at a time until the video read completes.
1714  for (; i < cluster->size() && !video_read_done; ++i) {
1715    AppendData(cluster->data() + i, 1);
1716    message_loop_.RunUntilIdle();
1717  }
1718
1719  EXPECT_TRUE(video_read_done);
1720  EXPECT_LT(i, cluster->size());
1721
1722  audio_read_done = false;
1723  video_read_done = false;
1724  ReadAudio(base::Bind(&OnReadDone,
1725                       base::TimeDelta::FromMilliseconds(23),
1726                       &audio_read_done));
1727  ReadVideo(base::Bind(&OnReadDone,
1728                       base::TimeDelta::FromMilliseconds(33),
1729                       &video_read_done));
1730
1731  // Make sure the reads haven't completed yet.
1732  EXPECT_FALSE(audio_read_done);
1733  EXPECT_FALSE(video_read_done);
1734
1735  // Append the remaining data.
1736  ASSERT_LT(i, cluster->size());
1737  AppendData(cluster->data() + i, cluster->size() - i);
1738
1739  message_loop_.RunUntilIdle();
1740
1741  EXPECT_TRUE(audio_read_done);
1742  EXPECT_TRUE(video_read_done);
1743}
1744
1745TEST_F(ChunkDemuxerTest, ParseErrorDuringInit) {
1746  EXPECT_CALL(*this, DemuxerOpened());
1747  demuxer_->Initialize(
1748      &host_, CreateInitDoneCB(
1749          kNoTimestamp(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1750
1751  ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1752
1753  uint8 tmp = 0;
1754  demuxer_->AppendData(kSourceId, &tmp, 1);
1755}
1756
1757TEST_F(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
1758  EXPECT_CALL(*this, DemuxerOpened());
1759  demuxer_->Initialize(
1760      &host_, CreateInitDoneCB(kNoTimestamp(),
1761                               DEMUXER_ERROR_COULD_NOT_OPEN), true);
1762
1763  std::vector<std::string> codecs(1);
1764  codecs[0] = "vorbis";
1765  ASSERT_EQ(demuxer_->AddId(kSourceId, "audio/webm", codecs),
1766            ChunkDemuxer::kOk);
1767
1768  AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
1769}
1770
1771TEST_F(ChunkDemuxerTest, AVHeadersWithVideoOnlyType) {
1772  EXPECT_CALL(*this, DemuxerOpened());
1773  demuxer_->Initialize(
1774      &host_, CreateInitDoneCB(kNoTimestamp(),
1775                               DEMUXER_ERROR_COULD_NOT_OPEN), true);
1776
1777  std::vector<std::string> codecs(1);
1778  codecs[0] = "vp8";
1779  ASSERT_EQ(demuxer_->AddId(kSourceId, "video/webm", codecs),
1780            ChunkDemuxer::kOk);
1781
1782  AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
1783}
1784
1785TEST_F(ChunkDemuxerTest, MultipleHeaders) {
1786  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1787
1788  AppendCluster(kDefaultFirstCluster());
1789
1790  // Append another identical initialization segment.
1791  AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
1792
1793  AppendCluster(kDefaultSecondCluster());
1794
1795  GenerateExpectedReads(0, 9);
1796}
1797
1798TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideo) {
1799  std::string audio_id = "audio1";
1800  std::string video_id = "video1";
1801  ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
1802
1803  // Append audio and video data into separate source ids.
1804  AppendCluster(audio_id,
1805      GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
1806  GenerateAudioStreamExpectedReads(0, 4);
1807  AppendCluster(video_id,
1808      GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
1809  GenerateVideoStreamExpectedReads(0, 4);
1810}
1811
1812TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideoText) {
1813  // TODO(matthewjheaney): Here and elsewhere, we need more tests
1814  // for inband text tracks (http://crbug/321455).
1815
1816  std::string audio_id = "audio1";
1817  std::string video_id = "video1";
1818
1819  EXPECT_CALL(host_, AddTextStream(_, _))
1820    .Times(Exactly(2));
1821  ASSERT_TRUE(InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, true));
1822
1823  // Append audio and video data into separate source ids.
1824  AppendCluster(audio_id,
1825      GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
1826  GenerateAudioStreamExpectedReads(0, 4);
1827  AppendCluster(video_id,
1828      GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
1829  GenerateVideoStreamExpectedReads(0, 4);
1830}
1831
1832TEST_F(ChunkDemuxerTest, AddIdFailures) {
1833  EXPECT_CALL(*this, DemuxerOpened());
1834  demuxer_->Initialize(
1835      &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
1836
1837  std::string audio_id = "audio1";
1838  std::string video_id = "video1";
1839
1840  ASSERT_EQ(AddId(audio_id, HAS_AUDIO), ChunkDemuxer::kOk);
1841
1842  // Adding an id with audio/video should fail because we already added audio.
1843  ASSERT_EQ(AddId(), ChunkDemuxer::kReachedIdLimit);
1844
1845  AppendInitSegmentWithSourceId(audio_id, HAS_AUDIO);
1846
1847  // Adding an id after append should fail.
1848  ASSERT_EQ(AddId(video_id, HAS_VIDEO), ChunkDemuxer::kReachedIdLimit);
1849}
1850
1851// Test that Read() calls after a RemoveId() return "end of stream" buffers.
1852TEST_F(ChunkDemuxerTest, RemoveId) {
1853  std::string audio_id = "audio1";
1854  std::string video_id = "video1";
1855  ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
1856
1857  // Append audio and video data into separate source ids.
1858  AppendCluster(audio_id,
1859      GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
1860  AppendCluster(video_id,
1861      GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
1862
1863  // Read() from audio should return normal buffers.
1864  GenerateAudioStreamExpectedReads(0, 4);
1865
1866  // Remove the audio id.
1867  demuxer_->RemoveId(audio_id);
1868
1869  // Read() from audio should return "end of stream" buffers.
1870  bool audio_read_done = false;
1871  ReadAudio(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
1872  message_loop_.RunUntilIdle();
1873  EXPECT_TRUE(audio_read_done);
1874
1875  // Read() from video should still return normal buffers.
1876  GenerateVideoStreamExpectedReads(0, 4);
1877}
1878
1879// Test that removing an ID immediately after adding it does not interfere with
1880// quota for new IDs in the future.
1881TEST_F(ChunkDemuxerTest, RemoveAndAddId) {
1882  std::string audio_id_1 = "audio1";
1883  ASSERT_TRUE(AddId(audio_id_1, HAS_AUDIO) == ChunkDemuxer::kOk);
1884  demuxer_->RemoveId(audio_id_1);
1885
1886  std::string audio_id_2 = "audio2";
1887  ASSERT_TRUE(AddId(audio_id_2, HAS_AUDIO) == ChunkDemuxer::kOk);
1888}
1889
1890TEST_F(ChunkDemuxerTest, SeekCanceled) {
1891  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1892
1893  // Append cluster at the beginning of the stream.
1894  AppendCluster(GenerateCluster(0, 4));
1895
1896  // Seek to an unbuffered region.
1897  Seek(base::TimeDelta::FromSeconds(50));
1898
1899  // Attempt to read in unbuffered area; should not fulfill the read.
1900  bool audio_read_done = false;
1901  bool video_read_done = false;
1902  ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
1903  ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
1904  EXPECT_FALSE(audio_read_done);
1905  EXPECT_FALSE(video_read_done);
1906
1907  // Now cancel the pending seek, which should flush the reads with empty
1908  // buffers.
1909  base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
1910  demuxer_->CancelPendingSeek(seek_time);
1911  message_loop_.RunUntilIdle();
1912  EXPECT_TRUE(audio_read_done);
1913  EXPECT_TRUE(video_read_done);
1914
1915  // A seek back to the buffered region should succeed.
1916  Seek(seek_time);
1917  GenerateExpectedReads(0, 4);
1918}
1919
1920TEST_F(ChunkDemuxerTest, SeekCanceledWhileWaitingForSeek) {
1921  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1922
1923  // Append cluster at the beginning of the stream.
1924  AppendCluster(GenerateCluster(0, 4));
1925
1926  // Start waiting for a seek.
1927  base::TimeDelta seek_time1 = base::TimeDelta::FromSeconds(50);
1928  base::TimeDelta seek_time2 = base::TimeDelta::FromSeconds(0);
1929  demuxer_->StartWaitingForSeek(seek_time1);
1930
1931  // Now cancel the upcoming seek to an unbuffered region.
1932  demuxer_->CancelPendingSeek(seek_time2);
1933  demuxer_->Seek(seek_time1, NewExpectedStatusCB(PIPELINE_OK));
1934
1935  // Read requests should be fulfilled with empty buffers.
1936  bool audio_read_done = false;
1937  bool video_read_done = false;
1938  ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
1939  ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
1940  EXPECT_TRUE(audio_read_done);
1941  EXPECT_TRUE(video_read_done);
1942
1943  // A seek back to the buffered region should succeed.
1944  Seek(seek_time2);
1945  GenerateExpectedReads(0, 4);
1946}
1947
1948// Test that Seek() successfully seeks to all source IDs.
1949TEST_F(ChunkDemuxerTest, SeekAudioAndVideoSources) {
1950  std::string audio_id = "audio1";
1951  std::string video_id = "video1";
1952  ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
1953
1954  AppendCluster(
1955      audio_id,
1956      GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
1957  AppendCluster(
1958      video_id,
1959      GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
1960
1961  // Read() should return buffers at 0.
1962  bool audio_read_done = false;
1963  bool video_read_done = false;
1964  ReadAudio(base::Bind(&OnReadDone,
1965                       base::TimeDelta::FromMilliseconds(0),
1966                       &audio_read_done));
1967  ReadVideo(base::Bind(&OnReadDone,
1968                       base::TimeDelta::FromMilliseconds(0),
1969                       &video_read_done));
1970  EXPECT_TRUE(audio_read_done);
1971  EXPECT_TRUE(video_read_done);
1972
1973  // Seek to 3 (an unbuffered region).
1974  Seek(base::TimeDelta::FromSeconds(3));
1975
1976  audio_read_done = false;
1977  video_read_done = false;
1978  ReadAudio(base::Bind(&OnReadDone,
1979                       base::TimeDelta::FromSeconds(3),
1980                       &audio_read_done));
1981  ReadVideo(base::Bind(&OnReadDone,
1982                       base::TimeDelta::FromSeconds(3),
1983                       &video_read_done));
1984  // Read()s should not return until after data is appended at the Seek point.
1985  EXPECT_FALSE(audio_read_done);
1986  EXPECT_FALSE(video_read_done);
1987
1988  AppendCluster(audio_id,
1989                GenerateSingleStreamCluster(
1990                    3000, 3092, kAudioTrackNum, kAudioBlockDuration));
1991  AppendCluster(video_id,
1992                GenerateSingleStreamCluster(
1993                    3000, 3132, kVideoTrackNum, kVideoBlockDuration));
1994
1995  message_loop_.RunUntilIdle();
1996
1997  // Read() should return buffers at 3.
1998  EXPECT_TRUE(audio_read_done);
1999  EXPECT_TRUE(video_read_done);
2000}
2001
2002// Test that Seek() completes successfully when EndOfStream
2003// is called before data is available for that seek point.
2004// This scenario might be useful if seeking past the end of stream
2005// of either audio or video (or both).
2006TEST_F(ChunkDemuxerTest, EndOfStreamAfterPastEosSeek) {
2007  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2008
2009  AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2010  AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2011
2012  // Seeking past the end of video.
2013  // Note: audio data is available for that seek point.
2014  bool seek_cb_was_called = false;
2015  base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(110);
2016  demuxer_->StartWaitingForSeek(seek_time);
2017  demuxer_->Seek(seek_time,
2018                 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2019  message_loop_.RunUntilIdle();
2020
2021  EXPECT_FALSE(seek_cb_was_called);
2022
2023  EXPECT_CALL(host_, SetDuration(
2024      base::TimeDelta::FromMilliseconds(120)));
2025  MarkEndOfStream(PIPELINE_OK);
2026  message_loop_.RunUntilIdle();
2027
2028  EXPECT_TRUE(seek_cb_was_called);
2029
2030  ShutdownDemuxer();
2031}
2032
2033// Test that EndOfStream is ignored if coming during a pending seek
2034// whose seek time is before some existing ranges.
2035TEST_F(ChunkDemuxerTest, EndOfStreamDuringPendingSeek) {
2036  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2037
2038  AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2039  AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2040  AppendCluster(GenerateSingleStreamCluster(200, 300, kAudioTrackNum, 10));
2041  AppendCluster(GenerateSingleStreamCluster(200, 300, kVideoTrackNum, 5));
2042
2043  bool seek_cb_was_called = false;
2044  base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(160);
2045  demuxer_->StartWaitingForSeek(seek_time);
2046  demuxer_->Seek(seek_time,
2047                 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2048  message_loop_.RunUntilIdle();
2049
2050  EXPECT_FALSE(seek_cb_was_called);
2051
2052  EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(300)));
2053  MarkEndOfStream(PIPELINE_OK);
2054  message_loop_.RunUntilIdle();
2055
2056  EXPECT_FALSE(seek_cb_was_called);
2057
2058  demuxer_->UnmarkEndOfStream();
2059
2060  AppendCluster(GenerateSingleStreamCluster(140, 180, kAudioTrackNum, 10));
2061  AppendCluster(GenerateSingleStreamCluster(140, 180, kVideoTrackNum, 5));
2062
2063  message_loop_.RunUntilIdle();
2064
2065  EXPECT_TRUE(seek_cb_was_called);
2066
2067  ShutdownDemuxer();
2068}
2069
2070// Test ranges in an audio-only stream.
2071TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioIdOnly) {
2072  EXPECT_CALL(*this, DemuxerOpened());
2073  demuxer_->Initialize(
2074      &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2075
2076  ASSERT_EQ(AddId(kSourceId, HAS_AUDIO), ChunkDemuxer::kOk);
2077  AppendInitSegment(HAS_AUDIO);
2078
2079  // Test a simple cluster.
2080  AppendCluster(
2081      GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2082
2083  CheckExpectedRanges("{ [0,92) }");
2084
2085  // Append a disjoint cluster to check for two separate ranges.
2086  AppendCluster(GenerateSingleStreamCluster(
2087      150, 219, kAudioTrackNum, kAudioBlockDuration));
2088
2089  CheckExpectedRanges("{ [0,92) [150,219) }");
2090}
2091
2092// Test ranges in a video-only stream.
2093TEST_F(ChunkDemuxerTest, GetBufferedRanges_VideoIdOnly) {
2094  EXPECT_CALL(*this, DemuxerOpened());
2095  demuxer_->Initialize(
2096      &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2097
2098  ASSERT_EQ(AddId(kSourceId, HAS_VIDEO), ChunkDemuxer::kOk);
2099  AppendInitSegment(HAS_VIDEO);
2100
2101  // Test a simple cluster.
2102  AppendCluster(
2103      GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2104
2105  CheckExpectedRanges("{ [0,132) }");
2106
2107  // Append a disjoint cluster to check for two separate ranges.
2108  AppendCluster(GenerateSingleStreamCluster(
2109      200, 299, kVideoTrackNum, kVideoBlockDuration));
2110
2111  CheckExpectedRanges("{ [0,132) [200,299) }");
2112}
2113
2114TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideo) {
2115  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2116
2117  // Audio: 0 -> 23
2118  // Video: 0 -> 33
2119  // Buffered Range: 0 -> 23
2120  // Audio block duration is smaller than video block duration,
2121  // so the buffered ranges should correspond to the audio blocks.
2122  AppendCluster(GenerateSingleStreamCluster(
2123      0, kAudioBlockDuration, kAudioTrackNum, kAudioBlockDuration));
2124  AppendCluster(GenerateSingleStreamCluster(
2125      0, kVideoBlockDuration, kVideoTrackNum, kVideoBlockDuration));
2126
2127  CheckExpectedRanges("{ [0,23) }");
2128
2129  // Audio: 300 -> 400
2130  // Video: 320 -> 420
2131  // Buffered Range: 320 -> 400  (end overlap)
2132  AppendCluster(GenerateSingleStreamCluster(300, 400, kAudioTrackNum, 50));
2133  AppendCluster(GenerateSingleStreamCluster(320, 420, kVideoTrackNum, 50));
2134
2135  CheckExpectedRanges("{ [0,23) [320,400) }");
2136
2137  // Audio: 520 -> 590
2138  // Video: 500 -> 570
2139  // Buffered Range: 520 -> 570  (front overlap)
2140  AppendCluster(GenerateSingleStreamCluster(520, 590, kAudioTrackNum, 70));
2141  AppendCluster(GenerateSingleStreamCluster(500, 570, kVideoTrackNum, 70));
2142
2143  CheckExpectedRanges("{ [0,23) [320,400) [520,570) }");
2144
2145  // Audio: 720 -> 750
2146  // Video: 700 -> 770
2147  // Buffered Range: 720 -> 750  (complete overlap, audio)
2148  AppendCluster(GenerateSingleStreamCluster(720, 750, kAudioTrackNum, 30));
2149  AppendCluster(GenerateSingleStreamCluster(700, 770, kVideoTrackNum, 70));
2150
2151  CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) }");
2152
2153  // Audio: 900 -> 970
2154  // Video: 920 -> 950
2155  // Buffered Range: 920 -> 950  (complete overlap, video)
2156  AppendCluster(GenerateSingleStreamCluster(900, 970, kAudioTrackNum, 70));
2157  AppendCluster(GenerateSingleStreamCluster(920, 950, kVideoTrackNum, 30));
2158
2159  CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2160
2161  // Appending within buffered range should not affect buffered ranges.
2162  AppendCluster(GenerateSingleStreamCluster(930, 950, kAudioTrackNum, 20));
2163  CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2164
2165  // Appending to single stream outside buffered ranges should not affect
2166  // buffered ranges.
2167  AppendCluster(GenerateSingleStreamCluster(1230, 1240, kVideoTrackNum, 10));
2168  CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2169}
2170
2171TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideoText) {
2172  EXPECT_CALL(host_, AddTextStream(_, _));
2173  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
2174
2175  // Append audio & video data
2176  AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23");
2177  AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
2178
2179  // Verify that a text track with no cues does not result in an empty buffered
2180  // range.
2181  CheckExpectedRanges("{ [0,46) }");
2182
2183  // Add some text cues.
2184  AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K");
2185
2186  // Verify that the new cues did not affect the buffered ranges.
2187  CheckExpectedRanges("{ [0,46) }");
2188
2189  // Remove the buffered range.
2190  demuxer_->Remove(kSourceId, base::TimeDelta(),
2191                   base::TimeDelta::FromMilliseconds(46));
2192  CheckExpectedRanges("{ }");
2193}
2194
2195// Once MarkEndOfStream() is called, GetBufferedRanges should not cut off any
2196// over-hanging tails at the end of the ranges as this is likely due to block
2197// duration differences.
2198TEST_F(ChunkDemuxerTest, GetBufferedRanges_EndOfStream) {
2199  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2200
2201  AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23K");
2202  AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
2203
2204  CheckExpectedRanges("{ [0,46) }");
2205
2206  EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
2207  MarkEndOfStream(PIPELINE_OK);
2208
2209  // Verify that the range extends to the end of the video data.
2210  CheckExpectedRanges("{ [0,66) }");
2211
2212  // Verify that the range reverts to the intersection when end of stream
2213  // has been cancelled.
2214  demuxer_->UnmarkEndOfStream();
2215  CheckExpectedRanges("{ [0,46) }");
2216
2217  // Append and remove data so that the 2 streams' end ranges do not overlap.
2218
2219  EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(246)));
2220  EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(366)));
2221  AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "200K 223K");
2222  AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
2223                            "200K 233 266 299 300K 333");
2224
2225  // At this point, the per-stream ranges are as follows:
2226  // Audio: [0,46) [200,246)
2227  // Video: [0,66) [200,366)
2228  CheckExpectedRanges("{ [0,46) [200,246) }");
2229
2230  demuxer_->Remove(kSourceId, base::TimeDelta::FromMilliseconds(200),
2231                   base::TimeDelta::FromMilliseconds(300));
2232
2233  // At this point, the per-stream ranges are as follows:
2234  // Audio: [0,46)
2235  // Video: [0,66) [300,366)
2236  CheckExpectedRanges("{ [0,46) }");
2237
2238  AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "200K 223K");
2239  AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "200K 233");
2240
2241  // At this point, the per-stream ranges are as follows:
2242  // Audio: [0,46) [200,246)
2243  // Video: [0,66) [200,266) [300,366)
2244  // NOTE: The last range on each stream do not overlap in time.
2245  CheckExpectedRanges("{ [0,46) [200,246) }");
2246
2247  MarkEndOfStream(PIPELINE_OK);
2248
2249  // NOTE: The last range on each stream gets extended to the highest
2250  // end timestamp according to the spec. The last audio range gets extended
2251  // from [200,246) to [200,366) which is why the intersection results in the
2252  // middle range getting larger AND the new range appearing.
2253  CheckExpectedRanges("{ [0,46) [200,266) [300,366) }");
2254}
2255
2256TEST_F(ChunkDemuxerTest, DifferentStreamTimecodes) {
2257  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2258
2259  // Create a cluster where the video timecode begins 25ms after the audio.
2260  AppendCluster(GenerateCluster(0, 25, 8));
2261
2262  Seek(base::TimeDelta::FromSeconds(0));
2263  GenerateExpectedReads(0, 25, 8);
2264
2265  // Seek to 5 seconds.
2266  Seek(base::TimeDelta::FromSeconds(5));
2267
2268  // Generate a cluster to fulfill this seek, where audio timecode begins 25ms
2269  // after the video.
2270  AppendCluster(GenerateCluster(5025, 5000, 8));
2271  GenerateExpectedReads(5025, 5000, 8);
2272}
2273
2274TEST_F(ChunkDemuxerTest, DifferentStreamTimecodesSeparateSources) {
2275  std::string audio_id = "audio1";
2276  std::string video_id = "video1";
2277  ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2278
2279  // Generate two streams where the video stream starts 5ms after the audio
2280  // stream and append them.
2281  AppendCluster(audio_id, GenerateSingleStreamCluster(
2282      25, 4 * kAudioBlockDuration + 25, kAudioTrackNum, kAudioBlockDuration));
2283  AppendCluster(video_id, GenerateSingleStreamCluster(
2284      30, 4 * kVideoBlockDuration + 30, kVideoTrackNum, kVideoBlockDuration));
2285
2286  // Both streams should be able to fulfill a seek to 25.
2287  Seek(base::TimeDelta::FromMilliseconds(25));
2288  GenerateAudioStreamExpectedReads(25, 4);
2289  GenerateVideoStreamExpectedReads(30, 4);
2290}
2291
2292TEST_F(ChunkDemuxerTest, DifferentStreamTimecodesOutOfRange) {
2293  std::string audio_id = "audio1";
2294  std::string video_id = "video1";
2295  ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2296
2297  // Generate two streams where the video stream starts 10s after the audio
2298  // stream and append them.
2299  AppendCluster(audio_id, GenerateSingleStreamCluster(0,
2300      4 * kAudioBlockDuration + 0, kAudioTrackNum, kAudioBlockDuration));
2301  AppendCluster(video_id, GenerateSingleStreamCluster(10000,
2302      4 * kVideoBlockDuration + 10000, kVideoTrackNum, kVideoBlockDuration));
2303
2304  // Should not be able to fulfill a seek to 0.
2305  base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(0);
2306  demuxer_->StartWaitingForSeek(seek_time);
2307  demuxer_->Seek(seek_time,
2308                 NewExpectedStatusCB(PIPELINE_ERROR_ABORT));
2309  ExpectRead(DemuxerStream::AUDIO, 0);
2310  ExpectEndOfStream(DemuxerStream::VIDEO);
2311}
2312
2313TEST_F(ChunkDemuxerTest, ClusterWithNoBuffers) {
2314  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2315
2316  // Generate and append an empty cluster beginning at 0.
2317  AppendEmptyCluster(0);
2318
2319  // Sanity check that data can be appended after this cluster correctly.
2320  AppendCluster(GenerateCluster(0, 2));
2321  ExpectRead(DemuxerStream::AUDIO, 0);
2322  ExpectRead(DemuxerStream::VIDEO, 0);
2323}
2324
2325TEST_F(ChunkDemuxerTest, CodecPrefixMatching) {
2326  ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2327
2328#if defined(USE_PROPRIETARY_CODECS)
2329  expected = ChunkDemuxer::kOk;
2330#endif
2331
2332  std::vector<std::string> codecs;
2333  codecs.push_back("avc1.4D4041");
2334
2335  EXPECT_EQ(demuxer_->AddId("source_id", "video/mp4", codecs), expected);
2336}
2337
2338// Test codec ID's that are not compliant with RFC6381, but have been
2339// seen in the wild.
2340TEST_F(ChunkDemuxerTest, CodecIDsThatAreNotRFC6381Compliant) {
2341  ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2342
2343#if defined(USE_PROPRIETARY_CODECS)
2344  expected = ChunkDemuxer::kOk;
2345#endif
2346  const char* codec_ids[] = {
2347    // GPAC places leading zeros on the audio object type.
2348    "mp4a.40.02",
2349    "mp4a.40.05"
2350  };
2351
2352  for (size_t i = 0; i < arraysize(codec_ids); ++i) {
2353    std::vector<std::string> codecs;
2354    codecs.push_back(codec_ids[i]);
2355
2356    ChunkDemuxer::Status result =
2357        demuxer_->AddId("source_id", "audio/mp4", codecs);
2358
2359    EXPECT_EQ(result, expected)
2360        << "Fail to add codec_id '" << codec_ids[i] << "'";
2361
2362    if (result == ChunkDemuxer::kOk)
2363      demuxer_->RemoveId("source_id");
2364  }
2365}
2366
2367TEST_F(ChunkDemuxerTest, EndOfStreamStillSetAfterSeek) {
2368  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2369
2370  EXPECT_CALL(host_, SetDuration(_))
2371      .Times(AnyNumber());
2372
2373  base::TimeDelta kLastAudioTimestamp = base::TimeDelta::FromMilliseconds(92);
2374  base::TimeDelta kLastVideoTimestamp = base::TimeDelta::FromMilliseconds(99);
2375
2376  AppendCluster(kDefaultFirstCluster());
2377  AppendCluster(kDefaultSecondCluster());
2378  MarkEndOfStream(PIPELINE_OK);
2379
2380  DemuxerStream::Status status;
2381  base::TimeDelta last_timestamp;
2382
2383  // Verify that we can read audio & video to the end w/o problems.
2384  ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2385  EXPECT_EQ(DemuxerStream::kOk, status);
2386  EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2387
2388  ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2389  EXPECT_EQ(DemuxerStream::kOk, status);
2390  EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2391
2392  // Seek back to 0 and verify that we can read to the end again..
2393  Seek(base::TimeDelta::FromMilliseconds(0));
2394
2395  ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2396  EXPECT_EQ(DemuxerStream::kOk, status);
2397  EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2398
2399  ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2400  EXPECT_EQ(DemuxerStream::kOk, status);
2401  EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2402}
2403
2404TEST_F(ChunkDemuxerTest, GetBufferedRangesBeforeInitSegment) {
2405  EXPECT_CALL(*this, DemuxerOpened());
2406  demuxer_->Initialize(&host_, CreateInitDoneCB(PIPELINE_OK), true);
2407  ASSERT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
2408  ASSERT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
2409
2410  CheckExpectedRanges("audio", "{ }");
2411  CheckExpectedRanges("video", "{ }");
2412}
2413
2414// Test that Seek() completes successfully when the first cluster
2415// arrives.
2416TEST_F(ChunkDemuxerTest, EndOfStreamDuringSeek) {
2417  InSequence s;
2418
2419  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2420
2421  AppendCluster(kDefaultFirstCluster());
2422
2423  base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
2424  demuxer_->StartWaitingForSeek(seek_time);
2425
2426  AppendCluster(kDefaultSecondCluster());
2427  EXPECT_CALL(host_, SetDuration(
2428      base::TimeDelta::FromMilliseconds(kDefaultSecondClusterEndTimestamp)));
2429  MarkEndOfStream(PIPELINE_OK);
2430
2431  demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
2432
2433  GenerateExpectedReads(0, 4);
2434  GenerateExpectedReads(46, 66, 5);
2435
2436  EndOfStreamHelper end_of_stream_helper(demuxer_.get());
2437  end_of_stream_helper.RequestReads();
2438  end_of_stream_helper.CheckIfReadDonesWereCalled(true);
2439}
2440
2441TEST_F(ChunkDemuxerTest, ConfigChange_Video) {
2442  InSequence s;
2443
2444  ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2445
2446  DemuxerStream::Status status;
2447  base::TimeDelta last_timestamp;
2448
2449  DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2450
2451  // Fetch initial video config and verify it matches what we expect.
2452  const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2453  ASSERT_TRUE(video_config_1.IsValidConfig());
2454  EXPECT_EQ(video_config_1.natural_size().width(), 320);
2455  EXPECT_EQ(video_config_1.natural_size().height(), 240);
2456
2457  ExpectRead(DemuxerStream::VIDEO, 0);
2458
2459  ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2460
2461  ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2462  EXPECT_EQ(last_timestamp.InMilliseconds(), 501);
2463
2464  // Fetch the new decoder config.
2465  const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2466  ASSERT_TRUE(video_config_2.IsValidConfig());
2467  EXPECT_EQ(video_config_2.natural_size().width(), 640);
2468  EXPECT_EQ(video_config_2.natural_size().height(), 360);
2469
2470  ExpectRead(DemuxerStream::VIDEO, 527);
2471
2472  // Read until the next config change.
2473  ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2474  ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2475  EXPECT_EQ(last_timestamp.InMilliseconds(), 793);
2476
2477  // Get the new config and verify that it matches the first one.
2478  ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2479
2480  ExpectRead(DemuxerStream::VIDEO, 801);
2481
2482  // Read until the end of the stream just to make sure there aren't any other
2483  // config changes.
2484  ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2485  ASSERT_EQ(status, DemuxerStream::kOk);
2486}
2487
2488TEST_F(ChunkDemuxerTest, ConfigChange_Audio) {
2489  InSequence s;
2490
2491  ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2492
2493  DemuxerStream::Status status;
2494  base::TimeDelta last_timestamp;
2495
2496  DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
2497
2498  // Fetch initial audio config and verify it matches what we expect.
2499  const AudioDecoderConfig& audio_config_1 = audio->audio_decoder_config();
2500  ASSERT_TRUE(audio_config_1.IsValidConfig());
2501  EXPECT_EQ(audio_config_1.samples_per_second(), 44100);
2502  EXPECT_EQ(audio_config_1.extra_data_size(), 3863u);
2503
2504  ExpectRead(DemuxerStream::AUDIO, 0);
2505
2506  ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2507
2508  ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2509  EXPECT_EQ(last_timestamp.InMilliseconds(), 524);
2510
2511  // Fetch the new decoder config.
2512  const AudioDecoderConfig& audio_config_2 = audio->audio_decoder_config();
2513  ASSERT_TRUE(audio_config_2.IsValidConfig());
2514  EXPECT_EQ(audio_config_2.samples_per_second(), 44100);
2515  EXPECT_EQ(audio_config_2.extra_data_size(), 3935u);
2516
2517  ExpectRead(DemuxerStream::AUDIO, 527);
2518
2519  // Read until the next config change.
2520  ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2521  ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2522  EXPECT_EQ(last_timestamp.InMilliseconds(), 759);
2523
2524  // Get the new config and verify that it matches the first one.
2525  ASSERT_TRUE(audio_config_1.Matches(audio->audio_decoder_config()));
2526
2527  ExpectRead(DemuxerStream::AUDIO, 779);
2528
2529  // Read until the end of the stream just to make sure there aren't any other
2530  // config changes.
2531  ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2532  ASSERT_EQ(status, DemuxerStream::kOk);
2533}
2534
2535TEST_F(ChunkDemuxerTest, ConfigChange_Seek) {
2536  InSequence s;
2537
2538  ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2539
2540  DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2541
2542  // Fetch initial video config and verify it matches what we expect.
2543  const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2544  ASSERT_TRUE(video_config_1.IsValidConfig());
2545  EXPECT_EQ(video_config_1.natural_size().width(), 320);
2546  EXPECT_EQ(video_config_1.natural_size().height(), 240);
2547
2548  ExpectRead(DemuxerStream::VIDEO, 0);
2549
2550  // Seek to a location with a different config.
2551  Seek(base::TimeDelta::FromMilliseconds(527));
2552
2553  // Verify that the config change is signalled.
2554  ExpectConfigChanged(DemuxerStream::VIDEO);
2555
2556  // Fetch the new decoder config and verify it is what we expect.
2557  const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2558  ASSERT_TRUE(video_config_2.IsValidConfig());
2559  EXPECT_EQ(video_config_2.natural_size().width(), 640);
2560  EXPECT_EQ(video_config_2.natural_size().height(), 360);
2561
2562  // Verify that Read() will return a buffer now.
2563  ExpectRead(DemuxerStream::VIDEO, 527);
2564
2565  // Seek back to the beginning and verify we get another config change.
2566  Seek(base::TimeDelta::FromMilliseconds(0));
2567  ExpectConfigChanged(DemuxerStream::VIDEO);
2568  ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2569  ExpectRead(DemuxerStream::VIDEO, 0);
2570
2571  // Seek to a location that requires a config change and then
2572  // seek to a new location that has the same configuration as
2573  // the start of the file without a Read() in the middle.
2574  Seek(base::TimeDelta::FromMilliseconds(527));
2575  Seek(base::TimeDelta::FromMilliseconds(801));
2576
2577  // Verify that no config change is signalled.
2578  ExpectRead(DemuxerStream::VIDEO, 801);
2579  ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2580}
2581
2582TEST_F(ChunkDemuxerTest, TimestampPositiveOffset) {
2583  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2584
2585  ASSERT_TRUE(demuxer_->SetTimestampOffset(
2586      kSourceId, base::TimeDelta::FromSeconds(30)));
2587  AppendCluster(GenerateCluster(0, 2));
2588
2589  Seek(base::TimeDelta::FromMilliseconds(30000));
2590
2591  GenerateExpectedReads(30000, 2);
2592}
2593
2594TEST_F(ChunkDemuxerTest, TimestampNegativeOffset) {
2595  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2596
2597  ASSERT_TRUE(demuxer_->SetTimestampOffset(
2598      kSourceId, base::TimeDelta::FromSeconds(-1)));
2599  AppendCluster(GenerateCluster(1000, 2));
2600
2601  GenerateExpectedReads(0, 2);
2602}
2603
2604TEST_F(ChunkDemuxerTest, TimestampOffsetSeparateStreams) {
2605  std::string audio_id = "audio1";
2606  std::string video_id = "video1";
2607  ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2608
2609  ASSERT_TRUE(demuxer_->SetTimestampOffset(
2610      audio_id, base::TimeDelta::FromMilliseconds(-2500)));
2611  ASSERT_TRUE(demuxer_->SetTimestampOffset(
2612      video_id, base::TimeDelta::FromMilliseconds(-2500)));
2613  AppendCluster(audio_id, GenerateSingleStreamCluster(2500,
2614      2500 + kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
2615  AppendCluster(video_id, GenerateSingleStreamCluster(2500,
2616      2500 + kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
2617  GenerateAudioStreamExpectedReads(0, 4);
2618  GenerateVideoStreamExpectedReads(0, 4);
2619
2620  Seek(base::TimeDelta::FromMilliseconds(27300));
2621
2622  ASSERT_TRUE(demuxer_->SetTimestampOffset(
2623      audio_id, base::TimeDelta::FromMilliseconds(27300)));
2624  ASSERT_TRUE(demuxer_->SetTimestampOffset(
2625      video_id, base::TimeDelta::FromMilliseconds(27300)));
2626  AppendCluster(audio_id, GenerateSingleStreamCluster(
2627      0, kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
2628  AppendCluster(video_id, GenerateSingleStreamCluster(
2629      0, kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
2630  GenerateVideoStreamExpectedReads(27300, 4);
2631  GenerateAudioStreamExpectedReads(27300, 4);
2632}
2633
2634TEST_F(ChunkDemuxerTest, TimestampOffsetMidMediaSegment) {
2635  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2636
2637  scoped_ptr<Cluster> cluster = GenerateCluster(0, 2);
2638  // Append only part of the cluster data.
2639  AppendData(cluster->data(), cluster->size() - 13);
2640
2641  // Setting a timestamp should fail because we're in the middle of a cluster.
2642  ASSERT_FALSE(demuxer_->SetTimestampOffset(
2643      kSourceId, base::TimeDelta::FromSeconds(25)));
2644
2645  demuxer_->Abort(kSourceId);
2646  // After Abort(), setting a timestamp should succeed since we're no longer
2647  // in the middle of a cluster
2648  ASSERT_TRUE(demuxer_->SetTimestampOffset(
2649      kSourceId, base::TimeDelta::FromSeconds(25)));
2650}
2651
2652TEST_F(ChunkDemuxerTest, WebMParsingMediaSegmentDetection) {
2653  // TODO(wolenetz): Also test 'unknown' sized clusters.
2654  // See http://crbug.com/335676.
2655  const uint8 kBuffer[] = {
2656    0x1F, 0x43, 0xB6, 0x75, 0x83,  // CLUSTER (size = 3)
2657    0xE7, 0x81, 0x01,                // Cluster TIMECODE (value = 1)
2658  };
2659
2660  // Setting timestamp offset or append mode is allowed only while not
2661  // parsing a media segment. This array indicates whether or not these
2662  // operations are allowed following each incrementally appended byte in
2663  // |kBuffer|.
2664  const bool kExpectedReturnValues[] = {
2665    true, true, true, true, false,
2666    false, false, true,
2667  };
2668
2669  COMPILE_ASSERT(arraysize(kBuffer) == arraysize(kExpectedReturnValues),
2670      test_arrays_out_of_sync);
2671  COMPILE_ASSERT(arraysize(kBuffer) == sizeof(kBuffer), not_one_byte_per_index);
2672
2673  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2674
2675  for (size_t i = 0; i < sizeof(kBuffer); i++) {
2676    DVLOG(3) << "Appending and testing index " << i;
2677    AppendData(kBuffer + i, 1);
2678    bool expected_return_value = kExpectedReturnValues[i];
2679    EXPECT_EQ(expected_return_value, demuxer_->SetTimestampOffset(
2680        kSourceId, base::TimeDelta::FromSeconds(25)));
2681    EXPECT_EQ(expected_return_value, demuxer_->SetSequenceMode(
2682        kSourceId, true));
2683    EXPECT_EQ(expected_return_value, demuxer_->SetSequenceMode(
2684        kSourceId, false));
2685  }
2686}
2687
2688TEST_F(ChunkDemuxerTest, SetSequenceModeMidMediaSegment) {
2689  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2690
2691  scoped_ptr<Cluster> cluster = GenerateCluster(0, 2);
2692  // Append only part of the cluster data.
2693  AppendData(cluster->data(), cluster->size() - 13);
2694
2695  // Setting append mode should fail because we're in the middle of a cluster.
2696  ASSERT_FALSE(demuxer_->SetSequenceMode(kSourceId, true));
2697  ASSERT_FALSE(demuxer_->SetSequenceMode(kSourceId, false));
2698
2699  demuxer_->Abort(kSourceId);
2700  // After Abort(), setting append mode should succeed since we're no longer
2701  // in the middle of a cluster.
2702  ASSERT_TRUE(demuxer_->SetSequenceMode(kSourceId, true));
2703  ASSERT_TRUE(demuxer_->SetSequenceMode(kSourceId, false));
2704}
2705
2706TEST_F(ChunkDemuxerTest, DurationChange) {
2707  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2708  const int kStreamDuration = kDefaultDuration().InMilliseconds();
2709
2710  // Add data leading up to the currently set duration.
2711  AppendCluster(GenerateCluster(kStreamDuration - kAudioBlockDuration,
2712                                kStreamDuration - kVideoBlockDuration,
2713                                2));
2714
2715  CheckExpectedRanges(kSourceId, "{ [201191,201224) }");
2716
2717  // Add data at the currently set duration. The duration should not increase.
2718  AppendCluster(GenerateCluster(kDefaultDuration().InMilliseconds(), 2));
2719
2720  // Range should not be affected.
2721  CheckExpectedRanges(kSourceId, "{ [201191,201224) }");
2722
2723  // Now add data past the duration and expect a new duration to be signalled.
2724  const int kNewStreamDuration = kStreamDuration + kAudioBlockDuration * 2;
2725  EXPECT_CALL(host_, SetDuration(
2726      base::TimeDelta::FromMilliseconds(kNewStreamDuration)));
2727  AppendCluster(GenerateCluster(kStreamDuration + kAudioBlockDuration,
2728                                kStreamDuration + kVideoBlockDuration,
2729                                2));
2730
2731  // See that the range has increased appropriately.
2732  CheckExpectedRanges(kSourceId, "{ [201191,201270) }");
2733}
2734
2735TEST_F(ChunkDemuxerTest, DurationChangeTimestampOffset) {
2736  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2737
2738  ASSERT_TRUE(demuxer_->SetTimestampOffset(kSourceId, kDefaultDuration()));
2739
2740  EXPECT_CALL(host_, SetDuration(
2741      kDefaultDuration() + base::TimeDelta::FromMilliseconds(
2742          kAudioBlockDuration * 2)));
2743  AppendCluster(GenerateCluster(0, 4));
2744}
2745
2746TEST_F(ChunkDemuxerTest, EndOfStreamTruncateDuration) {
2747  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2748
2749  AppendCluster(kDefaultFirstCluster());
2750
2751  EXPECT_CALL(host_, SetDuration(
2752      base::TimeDelta::FromMilliseconds(kDefaultFirstClusterEndTimestamp)));
2753  MarkEndOfStream(PIPELINE_OK);
2754}
2755
2756
2757TEST_F(ChunkDemuxerTest, ZeroLengthAppend) {
2758  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2759  AppendData(NULL, 0);
2760}
2761
2762TEST_F(ChunkDemuxerTest, AppendAfterEndOfStream) {
2763  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2764
2765  EXPECT_CALL(host_, SetDuration(_))
2766      .Times(AnyNumber());
2767
2768  AppendCluster(kDefaultFirstCluster());
2769  MarkEndOfStream(PIPELINE_OK);
2770
2771  demuxer_->UnmarkEndOfStream();
2772
2773  AppendCluster(kDefaultSecondCluster());
2774  MarkEndOfStream(PIPELINE_OK);
2775}
2776
2777// Test receiving a Shutdown() call before we get an Initialize()
2778// call. This can happen if video element gets destroyed before
2779// the pipeline has a chance to initialize the demuxer.
2780TEST_F(ChunkDemuxerTest, Shutdown_BeforeInitialize) {
2781  demuxer_->Shutdown();
2782  demuxer_->Initialize(
2783      &host_, CreateInitDoneCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
2784  message_loop_.RunUntilIdle();
2785}
2786
2787TEST_F(ChunkDemuxerTest, ReadAfterAudioDisabled) {
2788  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2789  AppendCluster(kDefaultFirstCluster());
2790
2791  DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
2792  ASSERT_TRUE(stream);
2793
2794  // The stream should no longer be present.
2795  demuxer_->OnAudioRendererDisabled();
2796  ASSERT_FALSE(demuxer_->GetStream(DemuxerStream::AUDIO));
2797
2798  // Normally this would return an audio buffer at timestamp zero, but
2799  // all reads should return EOS buffers when disabled.
2800  bool audio_read_done = false;
2801  stream->Read(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
2802  message_loop_.RunUntilIdle();
2803
2804  EXPECT_TRUE(audio_read_done);
2805}
2806
2807// Verifies that signalling end of stream while stalled at a gap
2808// boundary does not trigger end of stream buffers to be returned.
2809TEST_F(ChunkDemuxerTest, EndOfStreamWhileWaitingForGapToBeFilled) {
2810  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2811
2812  AppendCluster(0, 10);
2813  AppendCluster(300, 10);
2814  CheckExpectedRanges(kSourceId, "{ [0,132) [300,432) }");
2815
2816
2817  GenerateExpectedReads(0, 10);
2818
2819  bool audio_read_done = false;
2820  bool video_read_done = false;
2821  ReadAudio(base::Bind(&OnReadDone,
2822                       base::TimeDelta::FromMilliseconds(138),
2823                       &audio_read_done));
2824  ReadVideo(base::Bind(&OnReadDone,
2825                       base::TimeDelta::FromMilliseconds(138),
2826                       &video_read_done));
2827
2828  // Verify that the reads didn't complete
2829  EXPECT_FALSE(audio_read_done);
2830  EXPECT_FALSE(video_read_done);
2831
2832  EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(438)));
2833  MarkEndOfStream(PIPELINE_OK);
2834
2835  // Verify that the reads still haven't completed.
2836  EXPECT_FALSE(audio_read_done);
2837  EXPECT_FALSE(video_read_done);
2838
2839  demuxer_->UnmarkEndOfStream();
2840
2841  AppendCluster(138, 24);
2842
2843  message_loop_.RunUntilIdle();
2844
2845  CheckExpectedRanges(kSourceId, "{ [0,438) }");
2846
2847  // Verify that the reads have completed.
2848  EXPECT_TRUE(audio_read_done);
2849  EXPECT_TRUE(video_read_done);
2850
2851  // Read the rest of the buffers.
2852  GenerateExpectedReads(161, 171, 22);
2853
2854  // Verify that reads block because the append cleared the end of stream state.
2855  audio_read_done = false;
2856  video_read_done = false;
2857  ReadAudio(base::Bind(&OnReadDone_EOSExpected,
2858                       &audio_read_done));
2859  ReadVideo(base::Bind(&OnReadDone_EOSExpected,
2860                       &video_read_done));
2861
2862  // Verify that the reads don't complete.
2863  EXPECT_FALSE(audio_read_done);
2864  EXPECT_FALSE(video_read_done);
2865
2866  MarkEndOfStream(PIPELINE_OK);
2867
2868  EXPECT_TRUE(audio_read_done);
2869  EXPECT_TRUE(video_read_done);
2870}
2871
2872TEST_F(ChunkDemuxerTest, CanceledSeekDuringInitialPreroll) {
2873  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2874
2875  // Cancel preroll.
2876  base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(200);
2877  demuxer_->CancelPendingSeek(seek_time);
2878
2879  // Initiate the seek to the new location.
2880  Seek(seek_time);
2881
2882  // Append data to satisfy the seek.
2883  AppendCluster(seek_time.InMilliseconds(), 10);
2884}
2885
2886TEST_F(ChunkDemuxerTest, GCDuringSeek) {
2887  ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
2888
2889  demuxer_->SetMemoryLimitsForTesting(5 * kBlockSize);
2890
2891  base::TimeDelta seek_time1 = base::TimeDelta::FromMilliseconds(1000);
2892  base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(500);
2893
2894  // Initiate a seek to |seek_time1|.
2895  Seek(seek_time1);
2896
2897  // Append data to satisfy the first seek request.
2898  AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
2899                            seek_time1.InMilliseconds(), 5);
2900  CheckExpectedRanges(kSourceId, "{ [1000,1115) }");
2901
2902  // Signal that the second seek is starting.
2903  demuxer_->StartWaitingForSeek(seek_time2);
2904
2905  // Append data to satisfy the second seek. This append triggers
2906  // the garbage collection logic since we set the memory limit to
2907  // 5 blocks.
2908  AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
2909                            seek_time2.InMilliseconds(), 5);
2910
2911  // Verify that the buffers that cover |seek_time2| do not get
2912  // garbage collected.
2913  CheckExpectedRanges(kSourceId, "{ [500,615) }");
2914
2915  // Complete the seek.
2916  demuxer_->Seek(seek_time2, NewExpectedStatusCB(PIPELINE_OK));
2917
2918
2919  // Append more data and make sure that the blocks for |seek_time2|
2920  // don't get removed.
2921  //
2922  // NOTE: The current GC algorithm tries to preserve the GOP at the
2923  //  current position as well as the last appended GOP. This is
2924  //  why there are 2 ranges in the expectations.
2925  AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 700, 5);
2926  CheckExpectedRanges(kSourceId, "{ [500,592) [792,815) }");
2927}
2928
2929TEST_F(ChunkDemuxerTest, RemoveBeforeInitSegment) {
2930    EXPECT_CALL(*this, DemuxerOpened());
2931    demuxer_->Initialize(
2932        &host_, CreateInitDoneCB(kNoTimestamp(), PIPELINE_OK), true);
2933
2934    EXPECT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO | HAS_VIDEO));
2935
2936    demuxer_->Remove(kSourceId, base::TimeDelta::FromMilliseconds(0),
2937                     base::TimeDelta::FromMilliseconds(1));
2938}
2939
2940TEST_F(ChunkDemuxerTest, AppendWindow_Video) {
2941  ASSERT_TRUE(InitDemuxer(HAS_VIDEO));
2942  DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
2943
2944  // Set the append window to [20,280).
2945  demuxer_->SetAppendWindowStart(kSourceId,
2946                                 base::TimeDelta::FromMilliseconds(20));
2947  demuxer_->SetAppendWindowEnd(kSourceId,
2948                               base::TimeDelta::FromMilliseconds(280));
2949
2950  // Append a cluster that starts before and ends after the append window.
2951  AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
2952                            "0K 30 60 90 120K 150 180 210 240K 270 300 330K");
2953
2954  // Verify that GOPs that start outside the window are not included
2955  // in the buffer. Also verify that buffers that start inside the
2956  // window and extend beyond the end of the window are included.
2957  CheckExpectedRanges(kSourceId, "{ [120,300) }");
2958  CheckExpectedBuffers(stream, "120 150 180 210 240 270");
2959
2960  // Extend the append window to [20,650).
2961  demuxer_->SetAppendWindowEnd(kSourceId,
2962                               base::TimeDelta::FromMilliseconds(650));
2963
2964  // Append more data and verify that adding buffers start at the next
2965  // keyframe.
2966  AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
2967                            "360 390 420K 450 480 510 540K 570 600 630K");
2968  CheckExpectedRanges(kSourceId, "{ [120,300) [420,660) }");
2969}
2970
2971TEST_F(ChunkDemuxerTest, AppendWindow_Audio) {
2972  ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
2973  DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
2974
2975  // Set the append window to [20,280).
2976  demuxer_->SetAppendWindowStart(kSourceId,
2977                                 base::TimeDelta::FromMilliseconds(20));
2978  demuxer_->SetAppendWindowEnd(kSourceId,
2979                               base::TimeDelta::FromMilliseconds(280));
2980
2981  // Append a cluster that starts before and ends after the append window.
2982  AppendSingleStreamCluster(
2983      kSourceId, kAudioTrackNum,
2984      "0K 30K 60K 90K 120K 150K 180K 210K 240K 270K 300K 330K");
2985
2986  // Verify that frames that start outside the window are not included
2987  // in the buffer. Also verify that buffers that start inside the
2988  // window and extend beyond the end of the window are included.
2989  CheckExpectedRanges(kSourceId, "{ [30,300) }");
2990  CheckExpectedBuffers(stream, "30 60 90 120 150 180 210 240 270");
2991
2992  // Extend the append window to [20,650).
2993  demuxer_->SetAppendWindowEnd(kSourceId,
2994                               base::TimeDelta::FromMilliseconds(650));
2995
2996  // Append more data and verify that a new range is created.
2997  AppendSingleStreamCluster(
2998      kSourceId, kAudioTrackNum,
2999      "360K 390K 420K 450K 480K 510K 540K 570K 600K 630K");
3000  CheckExpectedRanges(kSourceId, "{ [30,300) [360,660) }");
3001}
3002
3003TEST_F(ChunkDemuxerTest, AppendWindow_Text) {
3004  DemuxerStream* text_stream = NULL;
3005  EXPECT_CALL(host_, AddTextStream(_, _))
3006      .WillOnce(SaveArg<0>(&text_stream));
3007  ASSERT_TRUE(InitDemuxer(HAS_VIDEO | HAS_TEXT));
3008  DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3009
3010  // Set the append window to [20,280).
3011  demuxer_->SetAppendWindowStart(kSourceId,
3012                                 base::TimeDelta::FromMilliseconds(20));
3013  demuxer_->SetAppendWindowEnd(kSourceId,
3014                               base::TimeDelta::FromMilliseconds(280));
3015
3016  // Append a cluster that starts before and ends after the append
3017  // window.
3018  AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3019                            "0K 30 60 90 120K 150 180 210 240K 270 300 330K");
3020  AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K 200K 300K");
3021
3022  // Verify that text cues that start outside the window are not included
3023  // in the buffer. Also verify that cues that extend beyond the
3024  // window are included.
3025  CheckExpectedRanges(kSourceId, "{ [120,300) }");
3026  CheckExpectedBuffers(video_stream, "120 150 180 210 240 270");
3027  CheckExpectedBuffers(text_stream, "100 200");
3028
3029  // Extend the append window to [20,650).
3030  demuxer_->SetAppendWindowEnd(kSourceId,
3031                               base::TimeDelta::FromMilliseconds(650));
3032
3033  // Append more data and verify that a new range is created.
3034  AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3035                            "360 390 420K 450 480 510 540K 570 600 630K");
3036  AppendSingleStreamCluster(kSourceId, kTextTrackNum, "400K 500K 600K 700K");
3037  CheckExpectedRanges(kSourceId, "{ [120,300) [420,660) }");
3038
3039  // Seek to the new range and verify that the expected buffers are returned.
3040  Seek(base::TimeDelta::FromMilliseconds(420));
3041  CheckExpectedBuffers(video_stream, "420 450 480 510 540 570 600 630");
3042  CheckExpectedBuffers(text_stream, "400 500 600");
3043}
3044
3045TEST_F(ChunkDemuxerTest, StartWaitingForSeekAfterParseError) {
3046  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3047  EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
3048  AppendGarbage();
3049  base::TimeDelta seek_time = base::TimeDelta::FromSeconds(50);
3050  demuxer_->StartWaitingForSeek(seek_time);
3051}
3052
3053TEST_F(ChunkDemuxerTest, Remove_AudioVideoText) {
3054  DemuxerStream* text_stream = NULL;
3055  EXPECT_CALL(host_, AddTextStream(_, _))
3056      .WillOnce(SaveArg<0>(&text_stream));
3057  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3058
3059  DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3060  DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3061
3062  AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3063                            "0K 20K 40K 60K 80K 100K 120K 140K");
3064  AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3065                            "0K 30 60 90 120K 150 180");
3066  AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K 200K");
3067
3068  CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
3069  CheckExpectedBuffers(video_stream, "0 30 60 90 120 150 180");
3070  CheckExpectedBuffers(text_stream, "0 100 200");
3071
3072  // Remove the buffers that were added.
3073  demuxer_->Remove(kSourceId, base::TimeDelta(),
3074                   base::TimeDelta::FromMilliseconds(300));
3075
3076  // Verify that all the appended data has been removed.
3077  CheckExpectedRanges(kSourceId, "{ }");
3078
3079  // Append new buffers that are clearly different than the original
3080  // ones and verify that only the new buffers are returned.
3081  AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3082                            "1K 21K 41K 61K 81K 101K 121K 141K");
3083  AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3084                            "1K 31 61 91 121K 151 181");
3085  AppendSingleStreamCluster(kSourceId, kTextTrackNum, "1K 101K 201K");
3086
3087  Seek(base::TimeDelta());
3088  CheckExpectedBuffers(audio_stream, "1 21 41 61 81 101 121 141");
3089  CheckExpectedBuffers(video_stream, "1 31 61 91 121 151 181");
3090  CheckExpectedBuffers(text_stream, "1 101 201");
3091}
3092
3093// Verifies that a Seek() will complete without text cues for
3094// the seek point and will return cues after the seek position
3095// when they are eventually appended.
3096TEST_F(ChunkDemuxerTest, SeekCompletesWithoutTextCues) {
3097  DemuxerStream* text_stream = NULL;
3098  EXPECT_CALL(host_, AddTextStream(_, _))
3099      .WillOnce(SaveArg<0>(&text_stream));
3100  ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3101
3102  DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3103  DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3104
3105  base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(120);
3106  bool seek_cb_was_called = false;
3107  demuxer_->StartWaitingForSeek(seek_time);
3108  demuxer_->Seek(seek_time,
3109                 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
3110  message_loop_.RunUntilIdle();
3111
3112  EXPECT_FALSE(seek_cb_was_called);
3113
3114  bool text_read_done = false;
3115  text_stream->Read(base::Bind(&OnReadDone,
3116                               base::TimeDelta::FromMilliseconds(125),
3117                               &text_read_done));
3118
3119  // Append audio & video data so the seek completes.
3120  AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3121                            "0K 20K 40K 60K 80K 100K 120K 140K 160K 180K");
3122  AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3123                            "0K 30 60 90 120K 150 180 210");
3124
3125  message_loop_.RunUntilIdle();
3126  EXPECT_TRUE(seek_cb_was_called);
3127  EXPECT_FALSE(text_read_done);
3128
3129  // Read some audio & video buffers to further verify seek completion.
3130  CheckExpectedBuffers(audio_stream, "120 140");
3131  CheckExpectedBuffers(video_stream, "120 150");
3132
3133  EXPECT_FALSE(text_read_done);
3134
3135  // Append text cues that start after the seek point and verify that
3136  // they are returned by Read() calls.
3137  AppendSingleStreamCluster(kSourceId, kTextTrackNum, "125K 175K 225K");
3138
3139  message_loop_.RunUntilIdle();
3140  EXPECT_TRUE(text_read_done);
3141
3142  // NOTE: we start at 175 here because the buffer at 125 was returned
3143  // to the pending read initiated above.
3144  CheckExpectedBuffers(text_stream, "175 225");
3145
3146  // Verify that audio & video streams contiue to return expected values.
3147  CheckExpectedBuffers(audio_stream, "160 180");
3148  CheckExpectedBuffers(video_stream, "180 210");
3149}
3150
3151}  // namespace media
3152