1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "media/filters/ffmpeg_demuxer.h"
6
7#include <algorithm>
8#include <string>
9
10#include "base/base64.h"
11#include "base/bind.h"
12#include "base/callback.h"
13#include "base/callback_helpers.h"
14#include "base/memory/scoped_ptr.h"
15#include "base/message_loop/message_loop.h"
16#include "base/metrics/sparse_histogram.h"
17#include "base/strings/string_util.h"
18#include "base/strings/stringprintf.h"
19#include "base/sys_byteorder.h"
20#include "base/task_runner_util.h"
21#include "base/time/time.h"
22#include "media/base/audio_decoder_config.h"
23#include "media/base/bind_to_loop.h"
24#include "media/base/decoder_buffer.h"
25#include "media/base/decrypt_config.h"
26#include "media/base/limits.h"
27#include "media/base/media_log.h"
28#include "media/base/video_decoder_config.h"
29#include "media/ffmpeg/ffmpeg_common.h"
30#include "media/filters/ffmpeg_glue.h"
31#include "media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.h"
32#include "media/filters/webvtt_util.h"
33#include "media/webm/webm_crypto_helpers.h"
34
35namespace media {
36
37//
38// FFmpegDemuxerStream
39//
40FFmpegDemuxerStream::FFmpegDemuxerStream(
41    FFmpegDemuxer* demuxer,
42    AVStream* stream)
43    : demuxer_(demuxer),
44      message_loop_(base::MessageLoopProxy::current()),
45      stream_(stream),
46      type_(UNKNOWN),
47      end_of_stream_(false),
48      last_packet_timestamp_(kNoTimestamp()),
49      bitstream_converter_enabled_(false) {
50  DCHECK(demuxer_);
51
52  bool is_encrypted = false;
53
54  // Determine our media format.
55  switch (stream->codec->codec_type) {
56    case AVMEDIA_TYPE_AUDIO:
57      type_ = AUDIO;
58      AVStreamToAudioDecoderConfig(stream, &audio_config_, true);
59      is_encrypted = audio_config_.is_encrypted();
60      break;
61    case AVMEDIA_TYPE_VIDEO:
62      type_ = VIDEO;
63      AVStreamToVideoDecoderConfig(stream, &video_config_, true);
64      is_encrypted = video_config_.is_encrypted();
65      break;
66    case AVMEDIA_TYPE_SUBTITLE:
67      type_ = TEXT;
68      break;
69    default:
70      NOTREACHED();
71      break;
72  }
73
74  // Calculate the duration.
75  duration_ = ConvertStreamTimestamp(stream->time_base, stream->duration);
76
77  if (stream_->codec->codec_id == AV_CODEC_ID_H264) {
78    bitstream_converter_.reset(
79        new FFmpegH264ToAnnexBBitstreamConverter(stream_->codec));
80  }
81
82  if (is_encrypted) {
83    AVDictionaryEntry* key = av_dict_get(stream->metadata, "enc_key_id", NULL,
84                                         0);
85    DCHECK(key);
86    DCHECK(key->value);
87    if (!key || !key->value)
88      return;
89    base::StringPiece base64_key_id(key->value);
90    std::string enc_key_id;
91    base::Base64Decode(base64_key_id, &enc_key_id);
92    DCHECK(!enc_key_id.empty());
93    if (enc_key_id.empty())
94      return;
95
96    encryption_key_id_.assign(enc_key_id);
97    demuxer_->FireNeedKey(kWebMEncryptInitDataType, enc_key_id);
98  }
99}
100
101void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) {
102  DCHECK(message_loop_->BelongsToCurrentThread());
103
104  if (!demuxer_ || end_of_stream_) {
105    NOTREACHED() << "Attempted to enqueue packet on a stopped stream";
106    return;
107  }
108
109  // Convert the packet if there is a bitstream filter.
110  if (packet->data && bitstream_converter_enabled_ &&
111      !bitstream_converter_->ConvertPacket(packet.get())) {
112    LOG(ERROR) << "Format conversion failed.";
113  }
114
115  // Get side data if any. For now, the only type of side_data is VP8 Alpha. We
116  // keep this generic so that other side_data types in the future can be
117  // handled the same way as well.
118  av_packet_split_side_data(packet.get());
119  scoped_refptr<DecoderBuffer> buffer;
120
121  if (type() == DemuxerStream::TEXT) {
122    int id_size = 0;
123    uint8* id_data = av_packet_get_side_data(
124        packet.get(),
125        AV_PKT_DATA_WEBVTT_IDENTIFIER,
126        &id_size);
127
128    int settings_size = 0;
129    uint8* settings_data = av_packet_get_side_data(
130        packet.get(),
131        AV_PKT_DATA_WEBVTT_SETTINGS,
132        &settings_size);
133
134    std::vector<uint8> side_data;
135    MakeSideData(id_data, id_data + id_size,
136                 settings_data, settings_data + settings_size,
137                 &side_data);
138
139    buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size,
140                                     side_data.data(), side_data.size());
141  } else {
142    int side_data_size = 0;
143    uint8* side_data = av_packet_get_side_data(
144        packet.get(),
145        AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL,
146        &side_data_size);
147
148    // If a packet is returned by FFmpeg's av_parser_parse2() the packet will
149    // reference inner memory of FFmpeg.  As such we should transfer the packet
150    // into memory we control.
151    if (side_data_size > 0) {
152      buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size,
153                                       side_data, side_data_size);
154    } else {
155      buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size);
156    }
157
158    int skip_samples_size = 0;
159    uint8* skip_samples = av_packet_get_side_data(packet.get(),
160                                                  AV_PKT_DATA_SKIP_SAMPLES,
161                                                  &skip_samples_size);
162    const int kSkipSamplesValidSize = 10;
163    const int kSkipSamplesOffset = 4;
164    if (skip_samples_size >= kSkipSamplesValidSize) {
165      int discard_padding_samples = base::ByteSwapToLE32(
166          *(reinterpret_cast<const uint32*>(skip_samples +
167                                            kSkipSamplesOffset)));
168      // TODO(vigneshv): Change decoder buffer to use number of samples so that
169      // this conversion can be avoided.
170      buffer->set_discard_padding(base::TimeDelta::FromMicroseconds(
171          discard_padding_samples * 1000000.0 /
172          audio_decoder_config().samples_per_second()));
173    }
174  }
175
176  if ((type() == DemuxerStream::AUDIO && audio_config_.is_encrypted()) ||
177      (type() == DemuxerStream::VIDEO && video_config_.is_encrypted())) {
178    scoped_ptr<DecryptConfig> config(WebMCreateDecryptConfig(
179        packet->data, packet->size,
180        reinterpret_cast<const uint8*>(encryption_key_id_.data()),
181        encryption_key_id_.size()));
182    if (!config)
183      LOG(ERROR) << "Creation of DecryptConfig failed.";
184    buffer->set_decrypt_config(config.Pass());
185  }
186
187  buffer->set_timestamp(ConvertStreamTimestamp(
188      stream_->time_base, packet->pts));
189  buffer->set_duration(ConvertStreamTimestamp(
190      stream_->time_base, packet->duration));
191  if (buffer->timestamp() != kNoTimestamp() &&
192      last_packet_timestamp_ != kNoTimestamp() &&
193      last_packet_timestamp_ < buffer->timestamp()) {
194    buffered_ranges_.Add(last_packet_timestamp_, buffer->timestamp());
195    demuxer_->NotifyBufferingChanged();
196  }
197  last_packet_timestamp_ = buffer->timestamp();
198
199  buffer_queue_.Push(buffer);
200  SatisfyPendingRead();
201}
202
203void FFmpegDemuxerStream::SetEndOfStream() {
204  DCHECK(message_loop_->BelongsToCurrentThread());
205  end_of_stream_ = true;
206  SatisfyPendingRead();
207}
208
209void FFmpegDemuxerStream::FlushBuffers() {
210  DCHECK(message_loop_->BelongsToCurrentThread());
211  DCHECK(read_cb_.is_null()) << "There should be no pending read";
212  buffer_queue_.Clear();
213  end_of_stream_ = false;
214  last_packet_timestamp_ = kNoTimestamp();
215}
216
217void FFmpegDemuxerStream::Stop() {
218  DCHECK(message_loop_->BelongsToCurrentThread());
219  buffer_queue_.Clear();
220  if (!read_cb_.is_null()) {
221    base::ResetAndReturn(&read_cb_).Run(
222        DemuxerStream::kOk, DecoderBuffer::CreateEOSBuffer());
223  }
224  demuxer_ = NULL;
225  stream_ = NULL;
226  end_of_stream_ = true;
227}
228
229base::TimeDelta FFmpegDemuxerStream::duration() {
230  return duration_;
231}
232
233DemuxerStream::Type FFmpegDemuxerStream::type() {
234  DCHECK(message_loop_->BelongsToCurrentThread());
235  return type_;
236}
237
238void FFmpegDemuxerStream::Read(const ReadCB& read_cb) {
239  DCHECK(message_loop_->BelongsToCurrentThread());
240  CHECK(read_cb_.is_null()) << "Overlapping reads are not supported";
241  read_cb_ = BindToCurrentLoop(read_cb);
242
243  // Don't accept any additional reads if we've been told to stop.
244  // The |demuxer_| may have been destroyed in the pipeline thread.
245  //
246  // TODO(scherkus): it would be cleaner to reply with an error message.
247  if (!demuxer_) {
248    base::ResetAndReturn(&read_cb_).Run(
249        DemuxerStream::kOk, DecoderBuffer::CreateEOSBuffer());
250    return;
251  }
252
253  SatisfyPendingRead();
254}
255
256void FFmpegDemuxerStream::EnableBitstreamConverter() {
257  DCHECK(message_loop_->BelongsToCurrentThread());
258  CHECK(bitstream_converter_.get());
259  bitstream_converter_enabled_ = true;
260}
261
262AudioDecoderConfig FFmpegDemuxerStream::audio_decoder_config() {
263  DCHECK(message_loop_->BelongsToCurrentThread());
264  CHECK_EQ(type_, AUDIO);
265  return audio_config_;
266}
267
268VideoDecoderConfig FFmpegDemuxerStream::video_decoder_config() {
269  DCHECK(message_loop_->BelongsToCurrentThread());
270  CHECK_EQ(type_, VIDEO);
271  return video_config_;
272}
273
274FFmpegDemuxerStream::~FFmpegDemuxerStream() {
275  DCHECK(!demuxer_);
276  DCHECK(read_cb_.is_null());
277  DCHECK(buffer_queue_.IsEmpty());
278}
279
280base::TimeDelta FFmpegDemuxerStream::GetElapsedTime() const {
281  return ConvertStreamTimestamp(stream_->time_base, stream_->cur_dts);
282}
283
284Ranges<base::TimeDelta> FFmpegDemuxerStream::GetBufferedRanges() const {
285  return buffered_ranges_;
286}
287
288void FFmpegDemuxerStream::SatisfyPendingRead() {
289  DCHECK(message_loop_->BelongsToCurrentThread());
290  if (!read_cb_.is_null()) {
291    if (!buffer_queue_.IsEmpty()) {
292      base::ResetAndReturn(&read_cb_).Run(
293          DemuxerStream::kOk, buffer_queue_.Pop());
294    } else if (end_of_stream_) {
295      base::ResetAndReturn(&read_cb_).Run(
296          DemuxerStream::kOk, DecoderBuffer::CreateEOSBuffer());
297    }
298  }
299
300  // Have capacity? Ask for more!
301  if (HasAvailableCapacity() && !end_of_stream_) {
302    demuxer_->NotifyCapacityAvailable();
303  }
304}
305
306bool FFmpegDemuxerStream::HasAvailableCapacity() {
307  // TODO(scherkus): Remove early return and reenable time-based capacity
308  // after our data sources support canceling/concurrent reads, see
309  // http://crbug.com/165762 for details.
310  return !read_cb_.is_null();
311
312  // Try to have one second's worth of encoded data per stream.
313  const base::TimeDelta kCapacity = base::TimeDelta::FromSeconds(1);
314  return buffer_queue_.IsEmpty() || buffer_queue_.Duration() < kCapacity;
315}
316
317TextKind FFmpegDemuxerStream::GetTextKind() const {
318  DCHECK_EQ(type_, DemuxerStream::TEXT);
319
320  if (stream_->disposition & AV_DISPOSITION_CAPTIONS)
321    return kTextCaptions;
322
323  if (stream_->disposition & AV_DISPOSITION_DESCRIPTIONS)
324    return kTextDescriptions;
325
326  if (stream_->disposition & AV_DISPOSITION_METADATA)
327    return kTextMetadata;
328
329  return kTextSubtitles;
330}
331
332std::string FFmpegDemuxerStream::GetMetadata(const char* key) const {
333  const AVDictionaryEntry* entry =
334      av_dict_get(stream_->metadata, key, NULL, 0);
335  return (entry == NULL || entry->value == NULL) ? "" : entry->value;
336}
337
338// static
339base::TimeDelta FFmpegDemuxerStream::ConvertStreamTimestamp(
340    const AVRational& time_base, int64 timestamp) {
341  if (timestamp == static_cast<int64>(AV_NOPTS_VALUE))
342    return kNoTimestamp();
343
344  return ConvertFromTimeBase(time_base, timestamp);
345}
346
347//
348// FFmpegDemuxer
349//
350FFmpegDemuxer::FFmpegDemuxer(
351    const scoped_refptr<base::MessageLoopProxy>& message_loop,
352    DataSource* data_source,
353    const NeedKeyCB& need_key_cb,
354    const scoped_refptr<MediaLog>& media_log)
355    : host_(NULL),
356      message_loop_(message_loop),
357      weak_factory_(this),
358      blocking_thread_("FFmpegDemuxer"),
359      pending_read_(false),
360      pending_seek_(false),
361      data_source_(data_source),
362      media_log_(media_log),
363      bitrate_(0),
364      start_time_(kNoTimestamp()),
365      audio_disabled_(false),
366      text_enabled_(false),
367      duration_known_(false),
368      url_protocol_(data_source, BindToLoop(message_loop_, base::Bind(
369          &FFmpegDemuxer::OnDataSourceError, base::Unretained(this)))),
370      need_key_cb_(need_key_cb) {
371  DCHECK(message_loop_.get());
372  DCHECK(data_source_);
373}
374
375FFmpegDemuxer::~FFmpegDemuxer() {}
376
377void FFmpegDemuxer::Stop(const base::Closure& callback) {
378  DCHECK(message_loop_->BelongsToCurrentThread());
379  url_protocol_.Abort();
380  data_source_->Stop(BindToCurrentLoop(base::Bind(
381      &FFmpegDemuxer::OnDataSourceStopped, weak_this_,
382      BindToCurrentLoop(callback))));
383  data_source_ = NULL;
384}
385
386void FFmpegDemuxer::Seek(base::TimeDelta time, const PipelineStatusCB& cb) {
387  DCHECK(message_loop_->BelongsToCurrentThread());
388  CHECK(!pending_seek_);
389
390  // TODO(scherkus): Inspect |pending_read_| and cancel IO via |blocking_url_|,
391  // otherwise we can end up waiting for a pre-seek read to complete even though
392  // we know we're going to drop it on the floor.
393
394  // Always seek to a timestamp less than or equal to the desired timestamp.
395  int flags = AVSEEK_FLAG_BACKWARD;
396
397  // Passing -1 as our stream index lets FFmpeg pick a default stream.  FFmpeg
398  // will attempt to use the lowest-index video stream, if present, followed by
399  // the lowest-index audio stream.
400  pending_seek_ = true;
401  base::PostTaskAndReplyWithResult(
402      blocking_thread_.message_loop_proxy().get(),
403      FROM_HERE,
404      base::Bind(&av_seek_frame,
405                 glue_->format_context(),
406                 -1,
407                 time.InMicroseconds(),
408                 flags),
409      base::Bind(&FFmpegDemuxer::OnSeekFrameDone, weak_this_, cb));
410}
411
412void FFmpegDemuxer::OnAudioRendererDisabled() {
413  DCHECK(message_loop_->BelongsToCurrentThread());
414  audio_disabled_ = true;
415  StreamVector::iterator iter;
416  for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
417    if (*iter && (*iter)->type() == DemuxerStream::AUDIO) {
418      (*iter)->Stop();
419    }
420  }
421}
422
423void FFmpegDemuxer::Initialize(DemuxerHost* host,
424                               const PipelineStatusCB& status_cb,
425                               bool enable_text_tracks) {
426  DCHECK(message_loop_->BelongsToCurrentThread());
427  host_ = host;
428  weak_this_ = weak_factory_.GetWeakPtr();
429  text_enabled_ = enable_text_tracks;
430
431  // TODO(scherkus): DataSource should have a host by this point,
432  // see http://crbug.com/122071
433  data_source_->set_host(host);
434
435  glue_.reset(new FFmpegGlue(&url_protocol_));
436  AVFormatContext* format_context = glue_->format_context();
437
438  // Disable ID3v1 tag reading to avoid costly seeks to end of file for data we
439  // don't use.  FFmpeg will only read ID3v1 tags if no other metadata is
440  // available, so add a metadata entry to ensure some is always present.
441  av_dict_set(&format_context->metadata, "skip_id3v1_tags", "", 0);
442
443  // Open the AVFormatContext using our glue layer.
444  CHECK(blocking_thread_.Start());
445  base::PostTaskAndReplyWithResult(
446      blocking_thread_.message_loop_proxy().get(),
447      FROM_HERE,
448      base::Bind(&FFmpegGlue::OpenContext, base::Unretained(glue_.get())),
449      base::Bind(&FFmpegDemuxer::OnOpenContextDone, weak_this_, status_cb));
450}
451
452DemuxerStream* FFmpegDemuxer::GetStream(DemuxerStream::Type type) {
453  DCHECK(message_loop_->BelongsToCurrentThread());
454  return GetFFmpegStream(type);
455}
456
457FFmpegDemuxerStream* FFmpegDemuxer::GetFFmpegStream(
458    DemuxerStream::Type type) const {
459  StreamVector::const_iterator iter;
460  for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
461    if (*iter && (*iter)->type() == type) {
462      return *iter;
463    }
464  }
465  return NULL;
466}
467
468base::TimeDelta FFmpegDemuxer::GetStartTime() const {
469  DCHECK(message_loop_->BelongsToCurrentThread());
470  return start_time_;
471}
472
473void FFmpegDemuxer::AddTextStreams() {
474  DCHECK(message_loop_->BelongsToCurrentThread());
475
476  for (StreamVector::size_type idx = 0; idx < streams_.size(); ++idx) {
477    FFmpegDemuxerStream* stream = streams_[idx];
478    if (stream == NULL || stream->type() != DemuxerStream::TEXT)
479      continue;
480
481    TextKind kind = stream->GetTextKind();
482    std::string title = stream->GetMetadata("title");
483    std::string language = stream->GetMetadata("language");
484
485    // TODO: Implement "id" metadata in FFMPEG.
486    // See: http://crbug.com/323183
487    host_->AddTextStream(stream, TextTrackConfig(kind, title, language,
488        std::string()));
489  }
490}
491
492// Helper for calculating the bitrate of the media based on information stored
493// in |format_context| or failing that the size and duration of the media.
494//
495// Returns 0 if a bitrate could not be determined.
496static int CalculateBitrate(
497    AVFormatContext* format_context,
498    const base::TimeDelta& duration,
499    int64 filesize_in_bytes) {
500  // If there is a bitrate set on the container, use it.
501  if (format_context->bit_rate > 0)
502    return format_context->bit_rate;
503
504  // Then try to sum the bitrates individually per stream.
505  int bitrate = 0;
506  for (size_t i = 0; i < format_context->nb_streams; ++i) {
507    AVCodecContext* codec_context = format_context->streams[i]->codec;
508    bitrate += codec_context->bit_rate;
509  }
510  if (bitrate > 0)
511    return bitrate;
512
513  // See if we can approximate the bitrate as long as we have a filesize and
514  // valid duration.
515  if (duration.InMicroseconds() <= 0 ||
516      duration == kInfiniteDuration() ||
517      filesize_in_bytes == 0) {
518    return 0;
519  }
520
521  // Do math in floating point as we'd overflow an int64 if the filesize was
522  // larger than ~1073GB.
523  double bytes = filesize_in_bytes;
524  double duration_us = duration.InMicroseconds();
525  return bytes * 8000000.0 / duration_us;
526}
527
528void FFmpegDemuxer::OnOpenContextDone(const PipelineStatusCB& status_cb,
529                                      bool result) {
530  DCHECK(message_loop_->BelongsToCurrentThread());
531  if (!blocking_thread_.IsRunning()) {
532    status_cb.Run(PIPELINE_ERROR_ABORT);
533    return;
534  }
535
536  if (!result) {
537    status_cb.Run(DEMUXER_ERROR_COULD_NOT_OPEN);
538    return;
539  }
540
541  // Fully initialize AVFormatContext by parsing the stream a little.
542  base::PostTaskAndReplyWithResult(
543      blocking_thread_.message_loop_proxy().get(),
544      FROM_HERE,
545      base::Bind(&avformat_find_stream_info,
546                 glue_->format_context(),
547                 static_cast<AVDictionary**>(NULL)),
548      base::Bind(&FFmpegDemuxer::OnFindStreamInfoDone, weak_this_, status_cb));
549}
550
551void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
552                                         int result) {
553  DCHECK(message_loop_->BelongsToCurrentThread());
554  if (!blocking_thread_.IsRunning() || !data_source_) {
555    status_cb.Run(PIPELINE_ERROR_ABORT);
556    return;
557  }
558
559  if (result < 0) {
560    status_cb.Run(DEMUXER_ERROR_COULD_NOT_PARSE);
561    return;
562  }
563
564  // Create demuxer stream entries for each possible AVStream. Each stream
565  // is examined to determine if it is supported or not (is the codec enabled
566  // for it in this release?). Unsupported streams are skipped, allowing for
567  // partial playback. At least one audio or video stream must be playable.
568  AVFormatContext* format_context = glue_->format_context();
569  streams_.resize(format_context->nb_streams);
570
571  AVStream* audio_stream = NULL;
572  AudioDecoderConfig audio_config;
573
574  AVStream* video_stream = NULL;
575  VideoDecoderConfig video_config;
576
577  base::TimeDelta max_duration;
578  for (size_t i = 0; i < format_context->nb_streams; ++i) {
579    AVStream* stream = format_context->streams[i];
580    AVCodecContext* codec_context = stream->codec;
581    AVMediaType codec_type = codec_context->codec_type;
582
583    if (codec_type == AVMEDIA_TYPE_AUDIO) {
584      if (audio_stream)
585        continue;
586
587      // Log the codec detected, whether it is supported or not.
588      UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedAudioCodec",
589                                  codec_context->codec_id);
590      // Ensure the codec is supported. IsValidConfig() also checks that the
591      // channel layout and sample format are valid.
592      AVStreamToAudioDecoderConfig(stream, &audio_config, false);
593      if (!audio_config.IsValidConfig())
594        continue;
595      audio_stream = stream;
596    } else if (codec_type == AVMEDIA_TYPE_VIDEO) {
597      if (video_stream)
598        continue;
599
600      // Log the codec detected, whether it is supported or not.
601      UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedVideoCodec",
602                                  codec_context->codec_id);
603      // Ensure the codec is supported. IsValidConfig() also checks that the
604      // frame size and visible size are valid.
605      AVStreamToVideoDecoderConfig(stream, &video_config, false);
606
607      if (!video_config.IsValidConfig())
608        continue;
609      video_stream = stream;
610    } else if (codec_type == AVMEDIA_TYPE_SUBTITLE) {
611      if (codec_context->codec_id != AV_CODEC_ID_WEBVTT || !text_enabled_) {
612        continue;
613      }
614    } else {
615      continue;
616    }
617
618    streams_[i] = new FFmpegDemuxerStream(this, stream);
619    max_duration = std::max(max_duration, streams_[i]->duration());
620
621    if (stream->first_dts != static_cast<int64_t>(AV_NOPTS_VALUE)) {
622      const base::TimeDelta first_dts = ConvertFromTimeBase(
623          stream->time_base, stream->first_dts);
624      if (start_time_ == kNoTimestamp() || first_dts < start_time_)
625        start_time_ = first_dts;
626    }
627  }
628
629  if (!audio_stream && !video_stream) {
630    status_cb.Run(DEMUXER_ERROR_NO_SUPPORTED_STREAMS);
631    return;
632  }
633
634  if (text_enabled_)
635    AddTextStreams();
636
637  if (format_context->duration != static_cast<int64_t>(AV_NOPTS_VALUE)) {
638    // If there is a duration value in the container use that to find the
639    // maximum between it and the duration from A/V streams.
640    const AVRational av_time_base = {1, AV_TIME_BASE};
641    max_duration =
642        std::max(max_duration,
643                 ConvertFromTimeBase(av_time_base, format_context->duration));
644  } else {
645    // The duration is unknown, in which case this is likely a live stream.
646    max_duration = kInfiniteDuration();
647  }
648
649  // Some demuxers, like WAV, do not put timestamps on their frames. We
650  // assume the the start time is 0.
651  if (start_time_ == kNoTimestamp())
652    start_time_ = base::TimeDelta();
653
654  // MPEG-4 B-frames cause grief for a simple container like AVI. Enable PTS
655  // generation so we always get timestamps, see http://crbug.com/169570
656  if (strcmp(format_context->iformat->name, "avi") == 0)
657    format_context->flags |= AVFMT_FLAG_GENPTS;
658
659  // Good to go: set the duration and bitrate and notify we're done
660  // initializing.
661  host_->SetDuration(max_duration);
662  duration_known_ = (max_duration != kInfiniteDuration());
663
664  int64 filesize_in_bytes = 0;
665  url_protocol_.GetSize(&filesize_in_bytes);
666  bitrate_ = CalculateBitrate(format_context, max_duration, filesize_in_bytes);
667  if (bitrate_ > 0)
668    data_source_->SetBitrate(bitrate_);
669
670  // Audio logging
671  if (audio_stream) {
672    AVCodecContext* audio_codec = audio_stream->codec;
673    media_log_->SetBooleanProperty("found_audio_stream", true);
674
675    SampleFormat sample_format = audio_config.sample_format();
676    std::string sample_name = SampleFormatToString(sample_format);
677
678    media_log_->SetStringProperty("audio_sample_format", sample_name);
679
680    AVCodec* codec = avcodec_find_decoder(audio_codec->codec_id);
681    if (codec) {
682      media_log_->SetStringProperty("audio_codec_name", codec->name);
683    }
684
685    media_log_->SetIntegerProperty("audio_channels_count",
686                                   audio_codec->channels);
687    media_log_->SetIntegerProperty("audio_samples_per_second",
688                                   audio_config.samples_per_second());
689  } else {
690    media_log_->SetBooleanProperty("found_audio_stream", false);
691  }
692
693  // Video logging
694  if (video_stream) {
695    AVCodecContext* video_codec = video_stream->codec;
696    media_log_->SetBooleanProperty("found_video_stream", true);
697
698    AVCodec* codec = avcodec_find_decoder(video_codec->codec_id);
699    if (codec) {
700      media_log_->SetStringProperty("video_codec_name", codec->name);
701    }
702
703    media_log_->SetIntegerProperty("width", video_codec->width);
704    media_log_->SetIntegerProperty("height", video_codec->height);
705    media_log_->SetIntegerProperty("coded_width",
706                                   video_codec->coded_width);
707    media_log_->SetIntegerProperty("coded_height",
708                                   video_codec->coded_height);
709    media_log_->SetStringProperty(
710        "time_base",
711        base::StringPrintf("%d/%d",
712                           video_codec->time_base.num,
713                           video_codec->time_base.den));
714    media_log_->SetStringProperty(
715        "video_format", VideoFrame::FormatToString(video_config.format()));
716    media_log_->SetBooleanProperty("video_is_encrypted",
717                                   video_config.is_encrypted());
718  } else {
719    media_log_->SetBooleanProperty("found_video_stream", false);
720  }
721
722
723  media_log_->SetDoubleProperty("max_duration", max_duration.InSecondsF());
724  media_log_->SetDoubleProperty("start_time", start_time_.InSecondsF());
725  media_log_->SetIntegerProperty("bitrate", bitrate_);
726
727  status_cb.Run(PIPELINE_OK);
728}
729
730void FFmpegDemuxer::OnSeekFrameDone(const PipelineStatusCB& cb, int result) {
731  DCHECK(message_loop_->BelongsToCurrentThread());
732  CHECK(pending_seek_);
733  pending_seek_ = false;
734
735  if (!blocking_thread_.IsRunning()) {
736    cb.Run(PIPELINE_ERROR_ABORT);
737    return;
738  }
739
740  if (result < 0) {
741    // Use VLOG(1) instead of NOTIMPLEMENTED() to prevent the message being
742    // captured from stdout and contaminates testing.
743    // TODO(scherkus): Implement this properly and signal error (BUG=23447).
744    VLOG(1) << "Not implemented";
745  }
746
747  // Tell streams to flush buffers due to seeking.
748  StreamVector::iterator iter;
749  for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
750    if (*iter)
751      (*iter)->FlushBuffers();
752  }
753
754  // Resume reading until capacity.
755  ReadFrameIfNeeded();
756
757  // Notify we're finished seeking.
758  cb.Run(PIPELINE_OK);
759}
760
761void FFmpegDemuxer::ReadFrameIfNeeded() {
762  DCHECK(message_loop_->BelongsToCurrentThread());
763
764  // Make sure we have work to do before reading.
765  if (!blocking_thread_.IsRunning() || !StreamsHaveAvailableCapacity() ||
766      pending_read_ || pending_seek_) {
767    return;
768  }
769
770  // Allocate and read an AVPacket from the media. Save |packet_ptr| since
771  // evaluation order of packet.get() and base::Passed(&packet) is
772  // undefined.
773  ScopedAVPacket packet(new AVPacket());
774  AVPacket* packet_ptr = packet.get();
775
776  pending_read_ = true;
777  base::PostTaskAndReplyWithResult(
778      blocking_thread_.message_loop_proxy().get(),
779      FROM_HERE,
780      base::Bind(&av_read_frame, glue_->format_context(), packet_ptr),
781      base::Bind(
782          &FFmpegDemuxer::OnReadFrameDone, weak_this_, base::Passed(&packet)));
783}
784
785void FFmpegDemuxer::OnReadFrameDone(ScopedAVPacket packet, int result) {
786  DCHECK(message_loop_->BelongsToCurrentThread());
787  DCHECK(pending_read_);
788  pending_read_ = false;
789
790  if (!blocking_thread_.IsRunning() || pending_seek_) {
791    return;
792  }
793
794  if (result < 0) {
795    // Update the duration based on the audio stream if
796    // it was previously unknown http://crbug.com/86830
797    if (!duration_known_) {
798      // Search streams for AUDIO one.
799      for (StreamVector::iterator iter = streams_.begin();
800           iter != streams_.end();
801           ++iter) {
802        if (*iter && (*iter)->type() == DemuxerStream::AUDIO) {
803          base::TimeDelta duration = (*iter)->GetElapsedTime();
804          if (duration != kNoTimestamp() && duration > base::TimeDelta()) {
805            host_->SetDuration(duration);
806            duration_known_ = true;
807          }
808          break;
809        }
810      }
811    }
812    // If we have reached the end of stream, tell the downstream filters about
813    // the event.
814    StreamHasEnded();
815    return;
816  }
817
818  // Queue the packet with the appropriate stream.
819  DCHECK_GE(packet->stream_index, 0);
820  DCHECK_LT(packet->stream_index, static_cast<int>(streams_.size()));
821
822  // Defend against ffmpeg giving us a bad stream index.
823  if (packet->stream_index >= 0 &&
824      packet->stream_index < static_cast<int>(streams_.size()) &&
825      streams_[packet->stream_index] &&
826      (!audio_disabled_ ||
827       streams_[packet->stream_index]->type() != DemuxerStream::AUDIO)) {
828
829    // TODO(scherkus): Fix demuxing upstream to never return packets w/o data
830    // when av_read_frame() returns success code. See bug comment for ideas:
831    //
832    // https://code.google.com/p/chromium/issues/detail?id=169133#c10
833    if (!packet->data) {
834      ScopedAVPacket new_packet(new AVPacket());
835      av_new_packet(new_packet.get(), 0);
836
837      new_packet->pts = packet->pts;
838      new_packet->dts = packet->dts;
839      new_packet->pos = packet->pos;
840      new_packet->duration = packet->duration;
841      new_packet->convergence_duration = packet->convergence_duration;
842      new_packet->flags = packet->flags;
843      new_packet->stream_index = packet->stream_index;
844
845      packet.swap(new_packet);
846    }
847
848    // Special case for opus in ogg.  FFmpeg is pre-trimming the codec delay
849    // from the packet timestamp.  Chrome expects to handle this itself inside
850    // the decoder, so shift timestamps by the delay in this case.
851    // TODO(dalecurtis): Try to get fixed upstream.  See http://crbug.com/328207
852    if (strcmp(glue_->format_context()->iformat->name, "ogg") == 0) {
853      const AVCodecContext* codec_context =
854          glue_->format_context()->streams[packet->stream_index]->codec;
855      if (codec_context->codec_id == AV_CODEC_ID_OPUS &&
856          codec_context->delay > 0) {
857        packet->pts += codec_context->delay;
858      }
859    }
860
861    FFmpegDemuxerStream* demuxer_stream = streams_[packet->stream_index];
862    demuxer_stream->EnqueuePacket(packet.Pass());
863  }
864
865  // Keep reading until we've reached capacity.
866  ReadFrameIfNeeded();
867}
868
869void FFmpegDemuxer::OnDataSourceStopped(const base::Closure& callback) {
870  // This will block until all tasks complete. Note that after this returns it's
871  // possible for reply tasks (e.g., OnReadFrameDone()) to be queued on this
872  // thread. Each of the reply task methods must check whether we've stopped the
873  // thread and drop their results on the floor.
874  DCHECK(message_loop_->BelongsToCurrentThread());
875  blocking_thread_.Stop();
876
877  StreamVector::iterator iter;
878  for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
879    if (*iter)
880      (*iter)->Stop();
881  }
882
883  callback.Run();
884}
885
886bool FFmpegDemuxer::StreamsHaveAvailableCapacity() {
887  DCHECK(message_loop_->BelongsToCurrentThread());
888  StreamVector::iterator iter;
889  for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
890    if (*iter && (*iter)->HasAvailableCapacity()) {
891      return true;
892    }
893  }
894  return false;
895}
896
897void FFmpegDemuxer::StreamHasEnded() {
898  DCHECK(message_loop_->BelongsToCurrentThread());
899  StreamVector::iterator iter;
900  for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
901    if (!*iter ||
902        (audio_disabled_ && (*iter)->type() == DemuxerStream::AUDIO)) {
903      continue;
904    }
905    (*iter)->SetEndOfStream();
906  }
907}
908
909void FFmpegDemuxer::FireNeedKey(const std::string& init_data_type,
910                                const std::string& encryption_key_id) {
911  std::vector<uint8> key_id_local(encryption_key_id.begin(),
912                                  encryption_key_id.end());
913  need_key_cb_.Run(init_data_type, key_id_local);
914}
915
916void FFmpegDemuxer::NotifyCapacityAvailable() {
917  DCHECK(message_loop_->BelongsToCurrentThread());
918  ReadFrameIfNeeded();
919}
920
921void FFmpegDemuxer::NotifyBufferingChanged() {
922  DCHECK(message_loop_->BelongsToCurrentThread());
923  Ranges<base::TimeDelta> buffered;
924  FFmpegDemuxerStream* audio =
925      audio_disabled_ ? NULL : GetFFmpegStream(DemuxerStream::AUDIO);
926  FFmpegDemuxerStream* video = GetFFmpegStream(DemuxerStream::VIDEO);
927  if (audio && video) {
928    buffered = audio->GetBufferedRanges().IntersectionWith(
929        video->GetBufferedRanges());
930  } else if (audio) {
931    buffered = audio->GetBufferedRanges();
932  } else if (video) {
933    buffered = video->GetBufferedRanges();
934  }
935  for (size_t i = 0; i < buffered.size(); ++i)
936    host_->AddBufferedTimeRange(buffered.start(i), buffered.end(i));
937}
938
939void FFmpegDemuxer::OnDataSourceError() {
940  host_->OnDemuxerError(PIPELINE_ERROR_READ);
941}
942
943}  // namespace media
944